You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/14 01:53:26 UTC

[04/20] hive git commit: HIVE-19820 : add ACID stats support to background stats updater and fix bunch of edge cases found in SU tests (Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 27d96e5..c328992 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -623,6 +623,9 @@ public interface IMetaStoreClient {
    */
   void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
 
+  void truncateTable(String dbName, String tableName, List<String> partNames,
+      long txnId, String validWriteIds, long writeId) throws TException;
+
   /**
    * Truncate the table/partitions in the DEFAULT database.
    * @param catName catalog name
@@ -1641,10 +1644,14 @@ public interface IMetaStoreClient {
    * @throws MetaException something went wrong, usually in the RDBMS
    * @throws TException general thrift exception
    */
+  @Deprecated
   void alter_table_with_environmentContext(String databaseName, String tblName, Table table,
       EnvironmentContext environmentContext) throws InvalidOperationException, MetaException,
       TException;
 
+  void alter_table(String catName, String databaseName, String tblName, Table table,
+      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
+          throws InvalidOperationException, MetaException, TException;
   /**
    * Create a new database.
    * @param db database object.  If the catalog name is null it will be assumed to be
@@ -2049,6 +2056,7 @@ public interface IMetaStoreClient {
    * @throws TException
    *           if error in communicating with metastore server
    */
+  @Deprecated
   default void alter_partition(String catName, String dbName, String tblName, Partition newPart)
       throws InvalidOperationException, MetaException, TException {
     alter_partition(catName, dbName, tblName, newPart, null);
@@ -2070,9 +2078,15 @@ public interface IMetaStoreClient {
    * @throws TException
    *           if error in communicating with metastore server
    */
+  @Deprecated
   void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
       throws InvalidOperationException, MetaException, TException;
 
+
+  void alter_partition(String dbName, String tblName, Partition newPart,
+      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      throws InvalidOperationException, MetaException, TException;
+
   /**
    * updates a partition to new partition
    * @param catName catalog name.
@@ -2109,6 +2123,7 @@ public interface IMetaStoreClient {
    * @throws TException
    *           if error in communicating with metastore server
    */
+  @Deprecated
   void alter_partitions(String dbName, String tblName, List<Partition> newParts)
       throws InvalidOperationException, MetaException, TException;
 
@@ -2129,6 +2144,7 @@ public interface IMetaStoreClient {
    * @throws TException
    *           if error in communicating with metastore server
    */
+  @Deprecated
   void alter_partitions(String dbName, String tblName, List<Partition> newParts,
       EnvironmentContext environmentContext)
       throws InvalidOperationException, MetaException, TException;
@@ -2154,6 +2170,7 @@ public interface IMetaStoreClient {
    * @throws TException
    *           if error in communicating with metastore server
    */
+  @Deprecated
   default void alter_partitions(String catName, String dbName, String tblName,
                                 List<Partition> newParts)
       throws InvalidOperationException, MetaException, TException {
@@ -3736,5 +3753,4 @@ public interface IMetaStoreClient {
 
   /** Reads runtime statistics. */
   List<RuntimeStat> getRuntimeStats(int maxWeight, int maxCreateTime) throws TException;
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index f45b71f..07be1ba 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -112,6 +112,7 @@ class MetaStoreDirectSql {
 
   private static final Logger LOG = LoggerFactory.getLogger(MetaStoreDirectSql.class);
   private final PersistenceManager pm;
+  private final Configuration conf;
   private final String schema;
 
   /**
@@ -146,8 +147,10 @@ class MetaStoreDirectSql {
       SKEWED_COL_VALUE_LOC_MAP, COLUMNS_V2, PARTITION_KEYS, SERDE_PARAMS, PART_COL_STATS, KEY_CONSTRAINTS,
       TAB_COL_STATS, PARTITION_KEY_VALS, PART_PRIVS, PART_COL_PRIVS, SKEWED_STRING_LIST, CDS;
 
+
   public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String schema) {
     this.pm = pm;
+    this.conf = conf;
     this.schema = schema;
     DatabaseProduct dbType = null;
     try {
@@ -645,8 +648,8 @@ class MetaStoreDirectSql {
     + " " + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\","
     + " " + PARTITIONS + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\","
     + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\","
-    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\" "
-    + "from " + PARTITIONS + ""
+    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\", " + PARTITIONS
+    + ".\"WRITE_ID\"" + " from " + PARTITIONS + ""
     + "  left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\" "
     + "  left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + SERDES + ".\"SERDE_ID\" "
     + "where \"PART_ID\" in (" + partIds + ") order by \"PART_NAME\" asc";
@@ -697,8 +700,13 @@ class MetaStoreDirectSql {
       part.setTableName(tblName);
       if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4]));
       if (fields[5] != null) part.setLastAccessTime(extractSqlInt(fields[5]));
+      Long writeId = extractSqlLong(fields[14]);
+      if (writeId != null) {
+        part.setWriteId(writeId);
+      }
       partitions.put(partitionId, part);
 
+
       if (sdId == null) continue; // Probably a view.
       assert serdeId != null;
 
@@ -747,6 +755,7 @@ class MetaStoreDirectSql {
       serde.setSerializationLib((String)fields[13]);
       serdeSb.append(serdeId).append(",");
       sd.setSerdeInfo(serde);
+
       Deadline.checkTimeout();
     }
     query.closeAll();
@@ -2489,7 +2498,9 @@ class MetaStoreDirectSql {
    */
   private void dropPartitionsByPartitionIds(List<Object> partitionIdList) throws MetaException {
     String queryText;
-
+    if (partitionIdList.isEmpty()) {
+      return;
+    }
     String partitionIds = getIdListForIn(partitionIdList);
 
     // Get the corresponding SD_ID-s, CD_ID-s, SERDE_ID-s
@@ -2570,6 +2581,9 @@ class MetaStoreDirectSql {
    * MetaException
    */
   private void dropStorageDescriptors(List<Object> storageDescriptorIdList) throws MetaException {
+    if (storageDescriptorIdList.isEmpty()) {
+      return;
+    }
     String queryText;
     String sdIds = getIdListForIn(storageDescriptorIdList);
 
@@ -2657,6 +2671,9 @@ class MetaStoreDirectSql {
    */
   private void dropSerdes(List<Object> serdeIdList) throws MetaException {
     String queryText;
+    if (serdeIdList.isEmpty()) {
+      return;
+    }
     String serdeIds = getIdListForIn(serdeIdList);
 
     try {
@@ -2683,6 +2700,9 @@ class MetaStoreDirectSql {
    */
   private void dropDanglingColumnDescriptors(List<Object> columnDescriptorIdList)
       throws MetaException {
+    if (columnDescriptorIdList.isEmpty()) {
+      return;
+    }
     String queryText;
     String colIds = getIdListForIn(columnDescriptorIdList);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index aa29dd9..b43e6f3 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1448,7 +1448,7 @@ public class ObjectStore implements RawStore, Configurable {
         if (tbl != null
             && TxnUtils.isTransactionalTable(tbl)
             && tbl.getPartitionKeysSize() == 0) {
-          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList)) {
+          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, false)) {
             tbl.setIsStatsCompliant(true);
           } else {
             tbl.setIsStatsCompliant(false);
@@ -1943,11 +1943,14 @@ public class ObjectStore implements RawStore, Configurable {
 
     t.setRewriteEnabled(mtbl.isRewriteEnabled());
     t.setCatName(mtbl.getDatabase().getCatalogName());
+    t.setWriteId(mtbl.getWriteId());
     return t;
   }
 
   private MTable convertToMTable(Table tbl) throws InvalidObjectException,
       MetaException {
+    // NOTE: we don't set writeId in this method. Write ID is only set after validating the
+    //       existing write ID against the caller's valid list.
     if (tbl == null) {
       return null;
     }
@@ -1986,9 +1989,6 @@ public class ObjectStore implements RawStore, Configurable {
         convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
         tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
         tableType);
-    if (TxnUtils.isTransactionalTable(tbl)) {
-      mtable.setWriteId(tbl.getWriteId());
-    }
     return mtable;
   }
 
@@ -2450,21 +2450,24 @@ public class ObjectStore implements RawStore, Configurable {
           + part_vals.toString());
     }
     part.setValues(part_vals);
+    setPartitionStatsParam(part, table.getParameters(), mpart.getWriteId(), txnId, writeIdList);
+    return part;
+  }
+
+  private void setPartitionStatsParam(Partition part, Map<String, String> tableParams,
+      long partWriteId, long reqTxnId, String reqWriteIdList) throws MetaException {
     // If transactional table partition, check whether the current version partition
     // statistics in the metastore comply with the client query's snapshot isolation.
-    if (writeIdList != null) {
-      if (TxnUtils.isTransactionalTable(table.getParameters())) {
-        if (isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
-          part.setIsStatsCompliant(true);
-        } else {
-          part.setIsStatsCompliant(false);
-          // Do not make persistent the following state since it is query specific (not global).
-          StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
-          LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters.");
-        }
-      }
+    if (reqWriteIdList == null) return;
+    if (!TxnUtils.isTransactionalTable(tableParams)) return;
+    if (isCurrentStatsValidForTheQuery(part, partWriteId, reqTxnId, reqWriteIdList, false)) {
+      part.setIsStatsCompliant(true);
+    } else {
+      part.setIsStatsCompliant(false);
+      // Do not make persistent the following state since it is query specific (not global).
+      StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
+      LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters.");
     }
-    return part;
   }
 
   /**
@@ -2570,6 +2573,8 @@ public class ObjectStore implements RawStore, Configurable {
    */
   private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD)
       throws InvalidObjectException, MetaException {
+    // NOTE: we don't set writeId in this method. Write ID is only set after validating the
+    //       existing write ID against the caller's valid list.
     if (part == null) {
       return null;
     }
@@ -2597,9 +2602,6 @@ public class ObjectStore implements RawStore, Configurable {
         .getPartitionKeys()), part.getValues()), mt, part.getValues(), part
         .getCreateTime(), part.getLastAccessTime(),
         msd, part.getParameters());
-    if (TxnUtils.isTransactionalTable(mt.getParameters())) {
-      mpart.setWriteId(part.getWriteId());
-    }
     return mpart;
   }
 
@@ -2612,6 +2614,7 @@ public class ObjectStore implements RawStore, Configurable {
         mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
         convertMap(mpart.getParameters()));
     p.setCatName(mpart.getTable().getDatabase().getCatalogName());
+    p.setWriteId(mpart.getWriteId());
     return p;
   }
 
@@ -2624,6 +2627,7 @@ public class ObjectStore implements RawStore, Configurable {
         mpart.getCreateTime(), mpart.getLastAccessTime(),
         convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters()));
     p.setCatName(catName);
+    p.setWriteId(mpart.getWriteId());
     return p;
   }
 
@@ -4113,6 +4117,16 @@ public class ObjectStore implements RawStore, Configurable {
       // For now only alter name, owner, parameters, cols, bucketcols are allowed
       oldt.setDatabase(newt.getDatabase());
       oldt.setTableName(normalizeIdentifier(newt.getTableName()));
+      boolean isTxn = TxnUtils.isTransactionalTable(newTable);
+      if (isTxn) {
+        // Transactional table is altered without a txn. Make sure there are no changes to the flag.
+        String errorMsg = verifyStatsChangeCtx(oldt.getParameters(), newTable.getParameters(),
+            newTable.getWriteId(), queryValidWriteIds, false);
+        if (errorMsg != null) {
+          throw new MetaException(errorMsg);
+        }
+      }
+      boolean isToTxn = isTxn && !TxnUtils.isTransactionalTable(oldt.getParameters());
       oldt.setParameters(newt.getParameters());
       oldt.setOwner(newt.getOwner());
       oldt.setOwnerType(newt.getOwnerType());
@@ -4135,13 +4149,16 @@ public class ObjectStore implements RawStore, Configurable {
 
       // If transactional, update MTable to have txnId and the writeIdList
       // for the current Stats updater query.
-      if (TxnUtils.isTransactionalTable(newTable) && queryValidWriteIds != null) {
+      // Don't update for conversion to acid - it doesn't modify stats but passes in qVWIds.
+      // The fact that it doesn't update stats is verified above.
+      if (isTxn && queryValidWriteIds != null && (!isToTxn || newTable.getWriteId() > 0)) {
         // Check concurrent INSERT case and set false to the flag.
-        if (!isCurrentStatsValidForTheQuery(oldt, queryTxnId, queryValidWriteIds)) {
+        if (!isCurrentStatsValidForTheQuery(oldt, queryTxnId, queryValidWriteIds, true)) {
           StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
           LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " +
                   dbname + "." + name + ". will be made persistent.");
         }
+        assert newTable.getWriteId() > 0;
         oldt.setWriteId(newTable.getWriteId());
       }
 
@@ -4154,6 +4171,32 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
+  /**
+   * Verifies that the stats JSON string is unchanged for alter table (txn stats).
+   * @return Error message with the details of the change, or null if the value has not changed.
+   */
+  private static String verifyStatsChangeCtx(Map<String, String> oldP, Map<String, String> newP,
+      long writeId, String validWriteIds, boolean isColStatsChange) {
+    if (validWriteIds != null && writeId > 0) return null; // We have txn context.
+    String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE);
+    String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE);
+    // We don't need txn context is that stats state is not being changed.
+    if (StringUtils.isEmpty(oldVal) && StringUtils.isEmpty(newVal)) return null;
+    if (StringUtils.equalsIgnoreCase(oldVal, newVal)) {
+      if (!isColStatsChange) return null; // No change in col stats or parameters => assume no change.
+      // Col stats change while json stays "valid" implies stats change. If the new value is invalid,
+      // then we don't care. This is super ugly and idiotic.
+      // It will all become better when we get rid of JSON and store a flag and write ID per stats.
+      if (!StatsSetupConst.areBasicStatsUptoDate(newP)) return null;
+    }
+    // Some change to the stats state is being made; it can only be made with a write ID.
+    // Note - we could do this:  if (writeId > 0 && (validWriteIds != null || !StatsSetupConst.areBasicStatsUptoDate(newP))) { return null;
+    //       However the only way ID list can be absent is if WriteEntity wasn't generated for the alter, which is a separate bug.
+    return "Cannot change stats state for a transactional table without providing the transactional"
+        + " write state for verification (new write ID " + writeId + ", valid write IDs "
+        + validWriteIds + "; current state " + oldVal + "; new state " + newVal;
+  }
+
   @Override
   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
       throws MetaException {
@@ -4210,6 +4253,15 @@ public class ObjectStore implements RawStore, Configurable {
     }
     oldp.setValues(newp.getValues());
     oldp.setPartitionName(newp.getPartitionName());
+    boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters());
+    if (isTxn) {
+      // Transactional table is altered without a txn. Make sure there are no changes to the flag.
+      String errorMsg = verifyStatsChangeCtx(oldp.getParameters(), newPart.getParameters(),
+          newPart.getWriteId(), queryValidWriteIds, false);
+      if (errorMsg != null) {
+        throw new MetaException(errorMsg);
+      }
+    }
     oldp.setParameters(newPart.getParameters());
     if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) {
       copyMSD(newp.getSd(), oldp.getSd());
@@ -4223,15 +4275,16 @@ public class ObjectStore implements RawStore, Configurable {
 
     // If transactional, add/update the MUPdaterTransaction
     // for the current updater query.
-    if (queryValidWriteIds != null && TxnUtils.isTransactionalTable(table.getParameters())) {
+    if (isTxn && queryValidWriteIds != null && newPart.getWriteId() > 0) {
       // Check concurrent INSERT case and set false to the flag.
-      if (!isCurrentStatsValidForTheQuery(oldp, queryTxnId, queryValidWriteIds)) {
+      if (!isCurrentStatsValidForTheQuery(oldp, queryTxnId, queryValidWriteIds, true)) {
         StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
         LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " +
                 dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
       }
       oldp.setWriteId(newPart.getWriteId());
     }
+
     return oldCD;
   }
 
@@ -4239,7 +4292,7 @@ public class ObjectStore implements RawStore, Configurable {
   public void alterPartition(String catName, String dbname, String name, List<String> part_vals,
       Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
     boolean success = false;
-    Exception e = null;
+    Throwable e = null;
     try {
       openTransaction();
       if (newPart.isSetWriteId()) {
@@ -4250,7 +4303,8 @@ public class ObjectStore implements RawStore, Configurable {
       removeUnusedColumnDescriptor(oldCd);
       // commit the changes
       success = commitTransaction();
-    } catch (Exception exception) {
+    } catch (Throwable exception) {
+      LOG.error("alterPartition failed", exception);
       e = exception;
     } finally {
       if (!success) {
@@ -4295,6 +4349,7 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
     } catch (Exception exception) {
       e = exception;
+      LOG.error("Alter failed", e);
     } finally {
       if (!success) {
         rollbackTransaction();
@@ -8354,7 +8409,8 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats)
+  public boolean updateTableColumnStatistics(ColumnStatistics colStats,
+      long txnId, String validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
 
@@ -8382,14 +8438,30 @@ public class ObjectStore implements RawStore, Configurable {
         // There is no need to add colname again, otherwise we will get duplicate colNames.
       }
 
+      // TODO## ideally the col stats stats should be in colstats, not in the table!
       // Set the table properties
       // No need to check again if it exists.
       String dbname = table.getDbName();
       String name = table.getTableName();
       MTable oldt = getMTable(catName, dbname, name);
-      Map<String, String> parameters = table.getParameters();
-      StatsSetupConst.setColumnStatsState(parameters, colNames);
-      oldt.setParameters(parameters);
+      Map<String, String> newParams = new HashMap<>(table.getParameters());
+      StatsSetupConst.setColumnStatsState(newParams, colNames);
+      boolean isTxn = TxnUtils.isTransactionalTable(oldt.getParameters());
+      if (isTxn) {
+        String errorMsg = verifyStatsChangeCtx(
+            oldt.getParameters(), newParams, writeId, validWriteIds, true);
+        if (errorMsg != null) {
+          throw new MetaException(errorMsg);
+        }
+        if (!isCurrentStatsValidForTheQuery(oldt, txnId, validWriteIds, true)) {
+          // Make sure we set the flag to invalid regardless of the current value.
+          StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE);
+          LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table "
+              + dbname + "." + name);
+        }
+        oldt.setWriteId(writeId);
+      }
+      oldt.setParameters(newParams);
 
       committed = commitTransaction();
       return committed;
@@ -8427,8 +8499,9 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals)
-    throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
+      long txnId, String validWriteIds, long writeId)
+          throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
 
     try {
@@ -8460,9 +8533,26 @@ public class ObjectStore implements RawStore, Configurable {
         writeMPartitionColumnStatistics(table, partition, mStatsObj,
             oldStats.get(statsObj.getColName()));
       }
-      Map<String, String> parameters = mPartition.getParameters();
-      StatsSetupConst.setColumnStatsState(parameters, colNames);
-      mPartition.setParameters(parameters);
+      // TODO## ideally the col stats stats should be in colstats, not in the partition!
+      Map<String, String> newParams = new HashMap<>(mPartition.getParameters());
+      StatsSetupConst.setColumnStatsState(newParams, colNames);
+      boolean isTxn = TxnUtils.isTransactionalTable(table);
+      if (isTxn) {
+        String errorMsg = verifyStatsChangeCtx(
+            mPartition.getParameters(), newParams, writeId, validWriteIds, true);
+        if (errorMsg != null) {
+          throw new MetaException(errorMsg);
+        }
+        if (!isCurrentStatsValidForTheQuery(mPartition, txnId, validWriteIds, true)) {
+          // Make sure we set the flag to invalid regardless of the current value.
+          StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE);
+          LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition "
+              + statsDesc.getDbName() + "." + statsDesc.getTableName() + "." + statsDesc.getPartName());
+        }
+        mPartition.setWriteId(writeId);
+      }
+
+      mPartition.setParameters(newParams);
       committed = commitTransaction();
       return committed;
     } finally {
@@ -8565,19 +8655,20 @@ public class ObjectStore implements RawStore, Configurable {
       List<String> colNames,
       long txnId,
       String writeIdList) throws MetaException, NoSuchObjectException {
-    Boolean iLL = null;
     // If the current stats in the metastore doesn't comply with
     // the isolation level of the query, set No to the compliance flag.
+    Boolean isCompliant = null;
     if (writeIdList != null) {
       MTable table = this.getMTable(catName, dbName, tableName);
-      iLL = isCurrentStatsValidForTheQuery(table, txnId, writeIdList);
+      isCompliant = !TxnUtils.isTransactionalTable(table.getParameters())
+        || isCurrentStatsValidForTheQuery(table, txnId, writeIdList, false);
     }
-    ColumnStatistics cS = getTableColumnStatisticsInternal(
+    ColumnStatistics stats = getTableColumnStatisticsInternal(
         catName, dbName, tableName, colNames, true, true);
-    if (cS != null && iLL != null) {
-      cS.setIsStatsCompliant(iLL);
+    if (stats != null && isCompliant != null) {
+      stats.setIsStatsCompliant(isCompliant);
     }
-    return cS;
+    return stats;
   }
 
   protected ColumnStatistics getTableColumnStatisticsInternal(
@@ -8634,30 +8725,31 @@ public class ObjectStore implements RawStore, Configurable {
       List<String> partNames, List<String> colNames,
       long txnId, String writeIdList)
       throws MetaException, NoSuchObjectException {
-
-    // If any of the current partition stats in the metastore doesn't comply with
-    // the isolation level of the query, return null.
+    if (partNames == null && partNames.isEmpty()) {
+      LOG.warn("The given partNames does not have any name.");
+      return null;
+    }
+    List<ColumnStatistics> allStats = getPartitionColumnStatisticsInternal(
+        catName, dbName, tableName, partNames, colNames, true, true);
     if (writeIdList != null) {
-      if (partNames == null && partNames.isEmpty()) {
-        LOG.warn("The given partNames does not have any name.");
-        return null;
-      }
-      // TODO## this is not correct; stats updater patch will fix it to return stats for valid partitions,
-      //        and no stats for invalid. Remove this comment when merging that patch.
-      // Loop through the given "partNames" list
-      // checking isolation-level-compliance of each partition column stats.
-      for(String partName : partNames) {
-        MPartition mpart = getMPartition(catName, dbName, tableName, Warehouse.getPartValuesFromPartName(partName));
-        if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
-          LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} "
-            + "(write ID {}) are not valid for current query ({} {})", dbName, tableName,
-            mpart.getPartitionName(), mpart.getWriteId(), txnId, writeIdList);
-          return Lists.newArrayList();
+      // TODO## this could be improved to get partitions in bulk
+      for (ColumnStatistics cs : allStats) {
+        MPartition mpart = getMPartition(catName, dbName, tableName,
+            Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName()));
+        if (mpart == null
+            || !isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, false)) {
+          if (mpart != null) {
+            LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} "
+              + "(write ID {}) are not valid for current query ({} {})", dbName, tableName,
+              mpart.getPartitionName(), mpart.getWriteId(), txnId, writeIdList);
+          }
+          cs.setIsStatsCompliant(false);
+        } else {
+          cs.setIsStatsCompliant(true);
         }
       }
     }
-    return getPartitionColumnStatisticsInternal(
-        catName, dbName, tableName, partNames, colNames, true, true);
+    return allStats;
   }
 
   protected List<ColumnStatistics> getPartitionColumnStatisticsInternal(
@@ -8726,7 +8818,7 @@ public class ObjectStore implements RawStore, Configurable {
       // checking isolation-level-compliance of each partition column stats.
       for(String partName : partNames) {
         MPartition mpart = getMPartition(catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName));
-        if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
+        if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, false)) {
           LOG.debug("The current metastore transactional partition column statistics " +
                   "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " +
                   "for the current query.");
@@ -8891,6 +8983,8 @@ public class ObjectStore implements RawStore, Configurable {
         throw new NoSuchObjectException("Table " + tableName
             + "  for which stats deletion is requested doesn't exist");
       }
+      // Note: this does not verify ACID state; called internally when removing cols/etc.
+      //       Also called via an unused metastore API that checks for ACID tables.
       MPartition mPartition = getMPartition(catName, dbName, tableName, partVals);
       if (mPartition == null) {
         throw new NoSuchObjectException("Partition " + partName
@@ -8973,6 +9067,8 @@ public class ObjectStore implements RawStore, Configurable {
             TableName.getQualified(catName, dbName, tableName)
             + "  for which stats deletion is requested doesn't exist");
       }
+      // Note: this does not verify ACID state; called internally when removing cols/etc.
+      //       Also called via an unused metastore API that checks for ACID tables.
       query = pm.newQuery(MTableColumnStatistics.class);
       String filter;
       String parameters;
@@ -12305,10 +12401,10 @@ public class ObjectStore implements RawStore, Configurable {
    * @param queryWriteId           writeId of the query
    * @Precondition   "tbl" should be retrieved from the TBLS table.
    */
-  private boolean isCurrentStatsValidForTheQuery(
-      MTable tbl, long queryTxnId, String queryValidWriteIdList) throws MetaException {
-    return isCurrentStatsValidForTheQuery(tbl.getDatabase().getName(), tbl.getTableName(),
-        tbl.getParameters(), tbl.getWriteId(), queryTxnId, queryValidWriteIdList);
+  private boolean isCurrentStatsValidForTheQuery(MTable tbl, long queryTxnId, String queryValidWriteIdList,
+      boolean isCompleteStatsWriter) throws MetaException {
+    return isCurrentStatsValidForTheQuery(conf, tbl.getDatabase().getName(), tbl.getTableName(),
+        tbl.getParameters(), tbl.getWriteId(), queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   /**
@@ -12325,30 +12421,39 @@ public class ObjectStore implements RawStore, Configurable {
    * @param queryValidWriteIdList  valid writeId list of the query
    * @Precondition   "part" should be retrieved from the PARTITIONS table.
    */
-  private boolean isCurrentStatsValidForTheQuery(
-      MPartition part, long queryTxnId, String queryValidWriteIdList)
+  private boolean isCurrentStatsValidForTheQuery(MPartition part, long queryTxnId,
+      String queryValidWriteIdList, boolean isCompleteStatsWriter)
       throws MetaException {
-    return isCurrentStatsValidForTheQuery(part.getTable().getDatabase().getName(),
+    return isCurrentStatsValidForTheQuery(conf, part.getTable().getDatabase().getName(),
         part.getTable().getTableName(), part.getParameters(), part.getWriteId(),
-        queryTxnId, queryValidWriteIdList);
+        queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
+  }
+
+  private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId, long queryTxnId,
+      String queryValidWriteIdList, boolean isCompleteStatsWriter)
+      throws MetaException {
+    return isCurrentStatsValidForTheQuery(conf, part.getDbName(), part.getTableName(),
+        part.getParameters(), partWriteId, queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
   }
 
-  private boolean isCurrentStatsValidForTheQuery(String dbName, String tblName,
-      Map<String, String> statsParams, long statsWriteId, long queryTxnId,
-      String queryValidWriteIdList) throws MetaException {
+  // TODO: move to somewhere else
+  public static boolean isCurrentStatsValidForTheQuery(Configuration conf, String dbName,
+      String tblName, Map<String, String> statsParams, long statsWriteId, long queryTxnId,
+      String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException {
 
     // Note: can be changed to debug/info to verify the calls.
-    LOG.trace("Called with stats write ID {}; query {}, {}; params {}",
-        statsWriteId, queryTxnId, queryValidWriteIdList, statsParams);
-    // if statsWriteIdList is null,
+    // TODO## change this to debug when merging
+    LOG.info("isCurrentStatsValidForTheQuery with stats write ID {}; query {}, {}; writer: {} params {}",
+        statsWriteId, queryTxnId, queryValidWriteIdList, isCompleteStatsWriter, statsParams);
     // return true since the stats does not seem to be transactional.
+    // stats write ID 1; query 2, default.stats_part:1:9223372036854775807::;
     if (statsWriteId < 1) {
       return true;
     }
     // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is
     // written by an aborted transaction but TXNS has no entry for the transaction
-    // after compaction.
-    if (!StatsSetupConst.areBasicStatsUptoDate(statsParams)) {
+    // after compaction. Don't check for a complete stats writer - it may replace invalid stats.
+    if (!isCompleteStatsWriter && !StatsSetupConst.areBasicStatsUptoDate(statsParams)) {
       return false;
     }
 
@@ -12359,12 +12464,22 @@ public class ObjectStore implements RawStore, Configurable {
       return true;
     }
 
-    ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList);
-    // Just check if the write ID is valid. If it's valid (i.e. we are allowed to see it),
-    // that means it cannot possibly be a concurrent write. If it's not valid (we are not
-    // allowed to see it), that means it's either concurrent or aborted, same thing for us.
-    if (list4TheQuery.isWriteIdValid(statsWriteId)) {
-      return true;
+    if (queryValidWriteIdList != null) { // Can be null when stats are being reset to invalid.
+      ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList);
+      // Just check if the write ID is valid. If it's valid (i.e. we are allowed to see it),
+      // that means it cannot possibly be a concurrent write. If it's not valid (we are not
+      // allowed to see it), that means it's either concurrent or aborted, same thing for us.
+      if (list4TheQuery.isWriteIdValid(statsWriteId)) {
+        return true;
+      }
+      // Updater is also allowed to overwrite stats from aborted txns, as long as they are not concurrent.
+      if (isCompleteStatsWriter && list4TheQuery.isWriteIdAborted(statsWriteId)) {
+        return true;
+      }
+    }
+
+    if (queryTxnId < 1) {
+      return false; // The caller is outside of a txn; no need to check the same-txn case.
     }
 
     // This assumes that all writes within the same txn are sequential and can see each other.

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 681e1e5..95e8445 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -864,7 +864,7 @@ public interface RawStore extends Configurable {
    * @throws InvalidObjectException the stats object is invalid
    * @throws InvalidInputException unable to record the stats for the table
    */
-  boolean updateTableColumnStatistics(ColumnStatistics colStats)
+  boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 
   /** Persists the given column statistics object to the metastore
@@ -875,9 +875,10 @@ public interface RawStore extends Configurable {
    * @throws MetaException error accessing the RDBMS.
    * @throws InvalidObjectException the stats object is invalid
    * @throws InvalidInputException unable to record the stats for the table
+   * @throws TException
    */
   boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
-     List<String> partVals)
+     List<String> partVals, long txnId, String validWriteIds, long writeId)
      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 8539605..9bee0db 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -547,6 +547,7 @@ public class CachedStore implements RawStore, Configurable {
         if (!table.isSetPartitionKeys()) {
           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
           Deadline.startTimer("getTableColumnStatistics");
+          // TODO## should this take write ID into account? or at least cache write ID to verify?
           ColumnStatistics tableColStats =
               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
           Deadline.stopTimer();
@@ -1598,9 +1599,9 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats)
+  public boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updateTableColumnStatistics(colStats);
+    boolean succ = rawStore.updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
     if (succ) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
@@ -1676,9 +1677,10 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals)
+  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
+      long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
+    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, txnId, validWriteIds, writeId);
     if (succ) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 1b4f01a..a357030 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -278,6 +278,18 @@ struct GrantRevokePrivilegeResponse {
   1: optional bool success;
 }
 
+struct TruncateTableRequest {
+  1: required string dbName,
+  2: required string tableName,
+  3: optional list<string> partNames,
+  4: optional i64 txnId=-1,
+  5: optional i64 writeId=-1,
+  6: optional string validWriteIdList
+}
+
+struct TruncateTableResponse {
+}
+
 struct Role {
   1: string roleName,
   2: i32 createTime,
@@ -571,9 +583,7 @@ struct ColumnStatisticsDesc {
 struct ColumnStatistics {
 1: required ColumnStatisticsDesc statsDesc,
 2: required list<ColumnStatisticsObj> statsObj,
-3: optional i64 txnId=-1,            // transaction id of the query that sends this structure TODO## needed?
-4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
-5: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the
+3: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the
                                                       // the calling query?
 }
 
@@ -591,6 +601,10 @@ struct SetPartitionsStatsRequest {
 5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
 }
 
+struct SetPartitionsStatsResponse {
+1: required bool result;
+}
+
 // schema of the table/query results etc.
 struct Schema {
  // column names, types, comments
@@ -1598,18 +1612,34 @@ struct GetRuntimeStatsRequest {
 }
 
 struct AlterPartitionsRequest {
-  1: required string dbName,
-  2: required string tableName,
-  3: required list<Partition> partitions,
-  4: required EnvironmentContext environmentContext,
-  5: optional i64 txnId=-1,
-  6: optional i64 writeId=-1,
-  7: optional string validWriteIdList
+  1: optional string catName,
+  2: required string dbName,
+  3: required string tableName,
+  4: required list<Partition> partitions,
+  5: optional EnvironmentContext environmentContext,
+  6: optional i64 txnId=-1,
+  7: optional i64 writeId=-1,
+  8: optional string validWriteIdList
 }
 
 struct AlterPartitionsResponse {
 }
 
+struct AlterTableRequest {
+  1: optional string catName,
+  2: required string dbName,
+  3: required string tableName,
+  4: required Table table,
+  5: optional EnvironmentContext environmentContext,
+  6: optional i64 txnId=-1,
+  7: optional i64 writeId=-1,
+  8: optional string validWriteIdList
+// TODO: also add cascade here, out of envCtx
+}
+
+struct AlterTableResponse {
+}
+
 // Exceptions.
 
 exception MetaException {
@@ -1754,6 +1784,7 @@ service ThriftHiveMetastore extends fb303.FacebookService
                        throws(1:NoSuchObjectException o1, 2:MetaException o3)
   void truncate_table(1:string dbName, 2:string tableName, 3:list<string> partNames)
                           throws(1:MetaException o1)
+  TruncateTableResponse truncate_table_req(1:TruncateTableRequest req) throws(1:MetaException o1)
   list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
   list<string> get_tables_by_type(1: string db_name, 2: string pattern, 3: string tableType) throws (1: MetaException o1)
   list<string> get_materialized_views_for_rewriting(1: string db_name) throws (1: MetaException o1)
@@ -1819,6 +1850,11 @@ service ThriftHiveMetastore extends fb303.FacebookService
   // alter table not only applies to future partitions but also cascade to existing partitions
   void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade)
                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+  AlterTableResponse alter_table_req(1:AlterTableRequest req)
+      throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+
+
   // the following applies to only tables that have partitions
   // * See notes on DDL_TIME
   Partition add_partition(1:Partition new_part)
@@ -1943,7 +1979,7 @@ service ThriftHiveMetastore extends fb303.FacebookService
 
   void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
 
-  AlterPartitionsResponse alter_partitions_with_environment_context_req(1:AlterPartitionsRequest req)
+  AlterPartitionsResponse alter_partitions_req(1:AlterPartitionsRequest req)
       throws (1:InvalidOperationException o1, 2:MetaException o2)
 
   void alter_partition_with_environment_context(1:string db_name,
@@ -2012,6 +2048,12 @@ service ThriftHiveMetastore extends fb303.FacebookService
   bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
               2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
 
+  SetPartitionsStatsResponse update_table_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
+              2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+  SetPartitionsStatsResponse update_partition_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
+              2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+
+
   // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
   // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
   // For instance, if get_table_column_statistics is called on a partitioned table for which only

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 106d9f2..a5e6918 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -694,18 +694,18 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {
-    return objectStore.updateTableColumnStatistics(statsObj);
+    return objectStore.updateTableColumnStatistics(statsObj, txnId, validWriteIds, writeId);
   }
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
-      List<String> partVals)
+      List<String> partVals, long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {
-    return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
+    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, txnId, validWriteIds, writeId);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 2587a98..8270f6a 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -743,13 +743,15 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj,
+      long txnId, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException {
     return false;
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
+  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
+      long txnId, String validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException {
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 6ef416f..868a546 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -1635,13 +1635,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     req.setTableName(tblName);
     req.setPartitions(newParts);
     req.setEnvironmentContext(environmentContext);
-    // TODO: this is ugly... account for ability to pass via EC for the old API.
-    if (environmentContext != null && environmentContext.isSetProperties()
-        && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) {
-      req.setTxnId(Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID)));
-      req.setValidWriteIdList(environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS));
-    }
-    client.alter_partitions_with_environment_context_req(req);
+    client.alter_partitions_req(req);
   }
 
   @Override
@@ -1656,7 +1650,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     req.setEnvironmentContext(environmentContext);
     req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
-    client.alter_partitions_with_environment_context_req(req);
+    client.alter_partitions_req(req);
   }
 
   @Override
@@ -3532,4 +3526,24 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     throw new UnsupportedOperationException();
   }
 
+  @Override
+  public void alter_table(String catName, String databaseName, String tblName, Table table,
+      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
+      throws InvalidOperationException, MetaException, TException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void alter_partition(String dbName, String tblName, Partition newPart,
+      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      throws InvalidOperationException, MetaException, TException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void truncateTable(String dbName, String tableName,
+      List<String> partNames, long txnId, String validWriteIds, long writeId)
+      throws TException {
+    throw new UnsupportedOperationException();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index d9dd954..533cabb 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@ -60,7 +60,7 @@ public class TestHiveAlterHandler {
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
   }
 
   @Test
@@ -85,7 +85,7 @@ public class TestHiveAlterHandler {
     RawStore msdb = Mockito.mock(RawStore.class);
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
     Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
     );
@@ -115,7 +115,7 @@ public class TestHiveAlterHandler {
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index cb32236..75ab80b 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -244,15 +244,13 @@ public abstract class TestHiveMetaStore {
       assertNotNull("Unable to create partition " + part4, retp4);
 
       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
-      if(isThriftClient) {
-        // since we are using thrift, 'part' will not have the create time and
-        // last DDL time set since it does not get updated in the add_partition()
-        // call - likewise part2 and part3 - set it correctly so that equals check
-        // doesn't fail
-        adjust(client, part, dbName, tblName);
-        adjust(client, part2, dbName, tblName);
-        adjust(client, part3, dbName, tblName);
-      }
+      // since we are using thrift, 'part' will not have the create time and
+      // last DDL time set since it does not get updated in the add_partition()
+      // call - likewise part2 and part3 - set it correctly so that equals check
+      // doesn't fail
+      adjust(client, part, dbName, tblName, isThriftClient);
+      adjust(client, part2, dbName, tblName, isThriftClient);
+      adjust(client, part3, dbName, tblName, isThriftClient);
       assertTrue("Partitions are not same", part.equals(part_get));
 
       // check null cols schemas for a partition
@@ -383,12 +381,10 @@ public abstract class TestHiveMetaStore {
       Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
       client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));
 
-      if(isThriftClient) {
-        // do DDL time munging if thrift mode
-        adjust(client, mpart1, dbName, tblName);
-        adjust(client, mpart2, dbName, tblName);
-        adjust(client, mpart3, dbName, tblName);
-      }
+      // do DDL time munging if thrift mode
+      adjust(client, mpart1, dbName, tblName, isThriftClient);
+      adjust(client, mpart2, dbName, tblName, isThriftClient);
+      adjust(client, mpart3, dbName, tblName, isThriftClient);
       verifyPartitionsPublished(client, dbName, tblName,
           Arrays.asList(mvals1.get(0)),
           Arrays.asList(mpart1,mpart2,mpart3));
@@ -418,10 +414,8 @@ public abstract class TestHiveMetaStore {
       // add_partitions(5) : ok
       client.add_partitions(Arrays.asList(mpart5));
 
-      if(isThriftClient) {
-        // do DDL time munging if thrift mode
-        adjust(client, mpart5, dbName, tblName);
-      }
+      // do DDL time munging if thrift mode
+      adjust(client, mpart5, dbName, tblName, isThriftClient);
 
       verifyPartitionsPublished(client, dbName, tblName,
           Arrays.asList(mvals1.get(0)),
@@ -1976,12 +1970,17 @@ public abstract class TestHiveMetaStore {
   }
 
   private static void adjust(HiveMetaStoreClient client, Partition part,
-      String dbName, String tblName) throws TException {
+      String dbName, String tblName, boolean isThriftClient) throws TException {
     Partition part_get = client.getPartition(dbName, tblName, part.getValues());
-    part.setCreateTime(part_get.getCreateTime());
-    part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
+    if (isThriftClient) {
+      part.setCreateTime(part_get.getCreateTime());
+      part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
+    }
+    part.setWriteId(part_get.getWriteId());
   }
 
+
+
   private static void silentDropDatabase(String dbName) throws TException {
     try {
       for (String tableName : client.getTables(dbName, "*")) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index fb4a761..fe64a91 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -244,6 +244,7 @@ public class TestMetaStoreEventListener {
     Assert.assertTrue(partEvent.getStatus());
     Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
     Partition partAdded = partEvent.getPartitionIterator().next();
+    partAdded.setWriteId(part.getWriteId());
     validateAddPartition(part, partAdded);
     validateTableInAddPartition(tbl, partEvent.getTable());
     validateAddPartition(part, prePartEvent.getPartitions().get(0));

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index c40d45d..995271a 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -559,7 +559,7 @@ public class TestObjectStore {
         ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
         statsObjList.add(partStats);
 
-        objectStore.updatePartitionColumnStatistics(stats, part.getValues());
+        objectStore.updatePartitionColumnStatistics(stats, part.getValues(), -1, null, -1);
       }
     }
     if (withPrivileges) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 717c5ee..01a8f81 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@ -175,7 +175,7 @@ public class TestOldSchema {
       data.setLongStats(dcsd);
       obj.setStatsData(data);
       cs.addToStatsObj(obj);
-      store.updatePartitionColumnStatistics(cs, partVal);
+      store.updatePartitionColumnStatistics(cs, partVal, -1, null, -1);
 
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 7cf5c26..e4854f9 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -455,7 +455,7 @@ public class TestCachedStore {
     stats.setStatsObj(colStatObjs);
 
     // Save to DB
-    objectStore.updateTableColumnStatistics(stats);
+    objectStore.updateTableColumnStatistics(stats, -1, null, -1);
 
     // Prewarm CachedStore
     CachedStore.setCachePrewarmedState(false);
@@ -720,8 +720,8 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);
@@ -790,10 +790,10 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
 
     longStats.setNumDVs(40);
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);
@@ -871,7 +871,7 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
 
     longStats.setNumDVs(40);
     hll = HyperLogLog.builder().build();
@@ -881,7 +881,7 @@ public class TestCachedStore {
     hll.addLong(5);
     longStats.setBitVectors(hll.serialize());
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 9b9b101..2ec20c2 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -410,11 +410,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partition("", TABLE_NAME, partitions.get(3));
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionNullDbName() throws Exception {
     createTable4PartColsParts(client);
     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
-    client.alter_partition(null, TABLE_NAME, partitions.get(3));
+    try {
+      client.alter_partition(null, TABLE_NAME, partitions.get(3));
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test(expected = InvalidOperationException.class)
@@ -424,11 +428,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partition(DB_NAME, "", partitions.get(3));
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionNullTblName() throws Exception {
     createTable4PartColsParts(client);
     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
-    client.alter_partition(DB_NAME, null, partitions.get(3));
+    try {
+      client.alter_partition(DB_NAME, null, partitions.get(3));
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test
@@ -536,11 +544,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partition("", TABLE_NAME, partitions.get(3), new EnvironmentContext());
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionWithEnvironmentCtxNullDbName() throws Exception {
     createTable4PartColsParts(client);
     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
-    client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
+    try {
+      client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test(expected = InvalidOperationException.class)
@@ -550,11 +562,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partition(DB_NAME, "", partitions.get(3), new EnvironmentContext());
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionWithEnvironmentCtxNullTblName() throws Exception {
     createTable4PartColsParts(client);
     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
-    client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
+    try {
+      client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test
@@ -680,11 +696,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part));
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionsNullDbName() throws Exception {
     createTable4PartColsParts(client);
     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
-    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
+    try {
+      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test(expected = InvalidOperationException.class)
@@ -853,11 +873,15 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionsWithEnvironmentCtxNullDbName() throws Exception {
     createTable4PartColsParts(client);
     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
-    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+    try {
+      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test(expected = InvalidOperationException.class)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
index 8ce8531..462584a 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
@@ -123,6 +123,7 @@ public class TestAppendPartitions extends MetaStoreClientTest {
     Assert.assertNotNull(appendedPart);
     Partition partition =
         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
+    appendedPart.setWriteId(partition.getWriteId());
     Assert.assertEquals(partition, appendedPart);
     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
@@ -141,6 +142,7 @@ public class TestAppendPartitions extends MetaStoreClientTest {
     Assert.assertNotNull(appendedPart);
     Partition partition =
         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
+    appendedPart.setWriteId(partition.getWriteId());
     Assert.assertEquals(partition, appendedPart);
     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may"));
@@ -294,6 +296,7 @@ public class TestAppendPartitions extends MetaStoreClientTest {
     Assert.assertNotNull(appendedPart);
     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
         getPartitionValues(partitionName));
+    appendedPart.setWriteId(partition.getWriteId());
     Assert.assertEquals(partition, appendedPart);
     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
@@ -312,6 +315,7 @@ public class TestAppendPartitions extends MetaStoreClientTest {
     Assert.assertNotNull(appendedPart);
     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
         getPartitionValues(partitionName));
+    appendedPart.setWriteId(partition.getWriteId());
     Assert.assertEquals(partition, appendedPart);
     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
     verifyPartitionNames(table, Lists.newArrayList(partitionName));
@@ -475,12 +479,14 @@ public class TestAppendPartitions extends MetaStoreClientTest {
     Assert.assertEquals("a1", created.getValues().get(0));
     Partition fetched =
         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+    created.setWriteId(fetched.getWriteId());
     Assert.assertEquals(created, fetched);
 
     created = client.appendPartition(catName, dbName, tableName, "partcol=a2");
     Assert.assertEquals(1, created.getValuesSize());
     Assert.assertEquals("a2", created.getValues().get(0));
     fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+    created.setWriteId(fetched.getWriteId());
     Assert.assertEquals(created, fetched);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index efa3e7c..c1674bf 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -237,6 +237,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
     // Reset the parameters, so we can compare
     table.setParameters(createdTable.getParameters());
     table.setCreationMetadata(createdTable.getCreationMetadata());
+    table.setWriteId(createdTable.getWriteId());
     Assert.assertEquals("create/get table data", table, createdTable);
 
     // Check that the directory is created
@@ -703,6 +704,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
     // Some of the data is set on the server side, so reset those
     newTable.setCreateTime(alteredTable.getCreateTime());
     newTable.setCreationMetadata(alteredTable.getCreationMetadata());
+    newTable.setWriteId(alteredTable.getWriteId());
     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
   }
 
@@ -916,13 +918,18 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterTableNullTableNameInNew() throws Exception {
     Table originalTable = testTables[0];
     Table newTable = originalTable.deepCopy();
     newTable.setTableName(null);
 
-    client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+    try {
+      client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+      // Expected.
+    }
   }
 
   @Test(expected = InvalidOperationException.class)
@@ -951,20 +958,28 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterTableNullDatabase() throws Exception {
     Table originalTable = testTables[0];
     Table newTable = originalTable.deepCopy();
-
-    client.alter_table(null, originalTable.getTableName(), newTable);
+    try {
+      client.alter_table(null, originalTable.getTableName(), newTable);
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterTableNullTableName() throws Exception {
     Table originalTable = testTables[0];
     Table newTable = originalTable.deepCopy();
 
-    client.alter_table(originalTable.getDbName(), null, newTable);
+    try {
+      client.alter_table(originalTable.getDbName(), null, newTable);
+      Assert.fail("Expected exception");
+    } catch (MetaException | TProtocolException ex) {
+      // Expected.
+    }
   }
 
   @Test
@@ -977,7 +992,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
       Assert.fail("Expected a NullPointerException or TTransportException to be thrown");
     } catch (NullPointerException exception) {
       // Expected exception - Embedded MetaStore
-    } catch (TTransportException exception) {
+    } catch (TProtocolException exception) {
       // Expected exception - Remote MetaStore
     }
   }