You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pv...@apache.org on 2023/01/15 09:32:13 UTC

[hive] branch branch-2.3 updated: HIVE-26882: Allow transactional check of Table parameter before altering the Table (#3888) (Peter Vary reviewed by Prasanth Jayachandran and Szehon Ho) (#3947)

This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
     new eaea1f0e1e8 HIVE-26882: Allow transactional check of Table parameter before altering the Table (#3888) (Peter Vary reviewed by Prasanth Jayachandran and Szehon Ho) (#3947)
eaea1f0e1e8 is described below

commit eaea1f0e1e84f9f66ba9f08cbe5d3c491d5f14c7
Author: pvary <pe...@gmail.com>
AuthorDate: Sun Jan 15 10:32:06 2023 +0100

    HIVE-26882: Allow transactional check of Table parameter before altering the Table (#3888) (Peter Vary reviewed by Prasanth Jayachandran and Szehon Ho) (#3947)
    
    * HIVE-17981 Create a set of builders for Thrift classes.  This closes #274.  (Alan Gates, reviewed by Peter Vary)
    
    * HIVE-18355: Add builder for metastore Thrift classes missed in the first pass - FunctionBuilder (Peter Vary, reviewed by Alan Gates)
    
    * HIVE-18372: Create testing infra to test different HMS instances (Peter Vary, reviewed by Marta Kuczora, Vihang Karajgaonkar and Adam Szita)
    
    * HIVE-26882: Allow transactional check of Table parameter before altering the Table (#3888) (Peter Vary reviewed by Prasanth Jayachandran and Szehon Ho)
    
    Co-authored-by: Alan Gates <ga...@hortonworks.com>
    Co-authored-by: Peter Vary <pv...@cloudera.com>
    Co-authored-by: Peter Vary <pe...@apple.com>
---
 .../hcatalog/listener/DummyRawStoreFailEvent.java  |   5 +
 metastore/if/hive_metastore.thrift                 |   3 +
 .../thrift/gen-cpp/hive_metastore_constants.cpp    |   4 +
 .../gen/thrift/gen-cpp/hive_metastore_constants.h  |   2 +
 .../metastore/api/hive_metastoreConstants.java     |   4 +
 .../src/gen/thrift/gen-php/metastore/Types.php     |  10 +
 .../gen/thrift/gen-py/hive_metastore/constants.py  |   2 +
 .../gen/thrift/gen-rb/hive_metastore_constants.rb  |   4 +
 .../metastore/DefaultPartitionExpressionProxy.java |  56 ++
 .../hadoop/hive/metastore/HiveAlterHandler.java    |  20 +-
 .../apache/hadoop/hive/metastore/ObjectStore.java  |  28 +-
 .../org/apache/hadoop/hive/metastore/RawStore.java |  14 +-
 .../client/builder/ConstraintBuilder.java          |  98 ++++
 .../metastore/client/builder/DatabaseBuilder.java  |  88 +++
 .../metastore/client/builder/FunctionBuilder.java  | 115 ++++
 .../GrantRevokePrivilegeRequestBuilder.java        |  63 +++
 .../client/builder/HiveObjectPrivilegeBuilder.java |  63 +++
 .../client/builder/HiveObjectRefBuilder.java       |  64 +++
 .../metastore/client/builder/IndexBuilder.java     | 104 ++++
 .../metastore/client/builder/PartitionBuilder.java | 102 ++++
 .../client/builder/PrivilegeGrantInfoBuilder.java  |  83 +++
 .../hive/metastore/client/builder/RoleBuilder.java |  55 ++
 .../client/builder/SQLForeignKeyBuilder.java       |  83 +++
 .../client/builder/SQLPrimaryKeyBuilder.java       |  42 ++
 .../client/builder/StorageDescriptorBuilder.java   | 210 +++++++
 .../metastore/client/builder/TableBuilder.java     | 155 +++++
 .../hadoop/hive/metastore/hbase/HBaseStore.java    |   6 +
 .../hadoop/hive/metastore/utils/SecurityUtils.java | 313 +++++++++++
 .../metastore/DummyRawStoreControlledCommit.java   |   5 +
 .../metastore/DummyRawStoreForJdoConnection.java   |   6 +
 .../metastore/client/MetaStoreFactoryForTests.java | 107 ++++
 .../hive/metastore/client/TestDatabases.java       | 622 +++++++++++++++++++++
 .../client/TestTablesCreateDropAlterTruncate.java  | 232 ++++++++
 .../minihms/AbstractMetaStoreService.java          | 153 +++++
 .../minihms/ClusterMetaStoreForTests.java          |  33 ++
 .../minihms/EmbeddedMetaStoreForTests.java         |  34 ++
 .../hadoop/hive/metastore/minihms/MiniHMS.java     |  69 +++
 .../metastore/minihms/RemoteMetaStoreForTests.java |  41 ++
 38 files changed, 3090 insertions(+), 8 deletions(-)

diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 9871083ac74..a5cc94e846c 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -119,6 +119,11 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
     return objectStore.openTransaction();
   }
 
+  @Override
+  public boolean openTransaction(String isolationLevel) {
+    return objectStore.openTransaction(isolationLevel);
+  }
+
   @Override
   public void rollbackTransaction() {
     objectStore.rollbackTransaction();
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 9df27319ffa..161fd8c5261 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -1548,3 +1548,6 @@ const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",
 const string TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties",
 
 
+// Keys for alter table environment context parameters
+const string EXPECTED_PARAMETER_KEY = "expected_parameter_key",
+const string EXPECTED_PARAMETER_VALUE = "expected_parameter_value",
\ No newline at end of file
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
index 1cbd176597b..a24bfd86f8d 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
@@ -59,6 +59,10 @@ hive_metastoreConstants::hive_metastoreConstants() {
 
   TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties";
 
+  EXPECTED_PARAMETER_KEY = "expected_parameter_key";
+
+  EXPECTED_PARAMETER_VALUE = "expected_parameter_value";
+
 }
 
 }}} // namespace
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
index 3d068c3ec9e..3798cdd9d9c 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
@@ -39,6 +39,8 @@ class hive_metastoreConstants {
   std::string TABLE_IS_TRANSACTIONAL;
   std::string TABLE_NO_AUTO_COMPACT;
   std::string TABLE_TRANSACTIONAL_PROPERTIES;
+  std::string EXPECTED_PARAMETER_KEY;
+  std::string EXPECTED_PARAMETER_VALUE;
 };
 
 extern const hive_metastoreConstants g_hive_metastore_constants;
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
index 8de8896bff4..f54726a01e6 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
@@ -84,4 +84,8 @@ public class hive_metastoreConstants {
 
   public static final String TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties";
 
+  public static final String EXPECTED_PARAMETER_KEY = "expected_parameter_key";
+
+  public static final String EXPECTED_PARAMETER_VALUE = "expected_parameter_value";
+
 }
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index d81ff27e178..a82ee5e8e88 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -20239,6 +20239,8 @@ final class Constant extends \Thrift\Type\TConstant {
   static protected $TABLE_IS_TRANSACTIONAL;
   static protected $TABLE_NO_AUTO_COMPACT;
   static protected $TABLE_TRANSACTIONAL_PROPERTIES;
+  static protected $EXPECTED_PARAMETER_KEY;
+  static protected $EXPECTED_PARAMETER_VALUE;
 
   static protected function init_DDL_TIME() {
     return "transient_lastDdlTime";
@@ -20335,6 +20337,14 @@ final class Constant extends \Thrift\Type\TConstant {
   static protected function init_TABLE_TRANSACTIONAL_PROPERTIES() {
     return "transactional_properties";
   }
+
+  static protected function init_EXPECTED_PARAMETER_KEY() {
+    return "expected_parameter_key";
+  }
+
+  static protected function init_EXPECTED_PARAMETER_VALUE() {
+    return "expected_parameter_value";
+  }
 }
 
 
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
index 5100236afa2..0e16bb3e787 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
@@ -33,3 +33,5 @@ META_TABLE_STORAGE = "storage_handler"
 TABLE_IS_TRANSACTIONAL = "transactional"
 TABLE_NO_AUTO_COMPACT = "no_auto_compaction"
 TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"
+EXPECTED_PARAMETER_KEY = "expected_parameter_key"
+EXPECTED_PARAMETER_VALUE = "expected_parameter_value"
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
index 6aa7143c76b..ad6cd226bb7 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
@@ -55,3 +55,7 @@ TABLE_NO_AUTO_COMPACT = %q"no_auto_compaction"
 
 TABLE_TRANSACTIONAL_PROPERTIES = %q"transactional_properties"
 
+EXPECTED_PARAMETER_KEY = %q"expected_parameter_key"
+
+EXPECTED_PARAMETER_VALUE = %q"expected_parameter_value"
+
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
new file mode 100644
index 00000000000..2b4a8f099ab
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
@@ -0,0 +1,56 @@
+package org.apache.hadoop.hive.metastore;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+
+import java.util.List;
+
+/**
+ * Default implementation of PartitionExpressionProxy.  Eventually this should use the SARGs in
+ * Hive's storage-api.  For now it just throws UnsupportedOperationException.
+ */
+public class DefaultPartitionExpressionProxy implements PartitionExpressionProxy {
+    @Override
+    public String convertExprToFilter(byte[] expr) throws MetaException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public boolean filterPartitionsByExpr(List<String> partColumnNames, List<PrimitiveTypeInfo> partColumnTypeInfos, byte[] expr, String defaultPartitionName, List<String> partitionNames) throws MetaException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public FileMetadataExprType getMetadataType(String inputFormat) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public SearchArgument createSarg(byte[] expr) {
+        throw new UnsupportedOperationException();
+    }
+}
\ No newline at end of file
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 3fefe5502b8..971c23f751a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hive.common.util.HiveStringUtils;
 
+import javax.jdo.Constants;
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
@@ -126,7 +127,18 @@ public class HiveAlterHandler implements AlterHandler {
     }
 
     try {
-      msdb.openTransaction();
+      String expectedKey = environmentContext != null && environmentContext.getProperties() != null ?
+              environmentContext.getProperties().get(hive_metastoreConstants.EXPECTED_PARAMETER_KEY) : null;
+      String expectedValue = environmentContext != null && environmentContext.getProperties() != null ?
+              environmentContext.getProperties().get(hive_metastoreConstants.EXPECTED_PARAMETER_VALUE) : null;
+
+      if (expectedKey != null) {
+        // If we have to check the expected state of the table we have to prevent nonrepeatable reads.
+        msdb.openTransaction(Constants.TX_REPEATABLE_READ);
+      } else {
+        msdb.openTransaction();
+      }
+
       name = name.toLowerCase();
       dbname = dbname.toLowerCase();
 
@@ -146,6 +158,12 @@ public class HiveAlterHandler implements AlterHandler {
         throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist");
       }
 
+      if (expectedKey != null && expectedValue != null
+              && !expectedValue.equals(oldt.getParameters().get(expectedKey))) {
+        throw new MetaException("The table has been modified. The parameter value for key '" + expectedKey + "' is '"
+                + oldt.getParameters().get(expectedKey) + "'. The expected was value was '" + expectedValue + "'");
+      }
+
       // Views derive the column type from the base table definition.  So the view definition
       // can be altered to change the column types.  The column type compatibility checks should
       // be done only for non-views.
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index bb69d07db89..eab348f76f3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -561,17 +561,31 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   /**
-   * Opens a new one or the one already created Every call of this function must
-   * have corresponding commit or rollback function call
+   * Opens a new one or the one already created. Every call of this function must
+   * have corresponding commit or rollback function call.
    *
    * @return an active transaction
    */
-
   @Override
   public boolean openTransaction() {
+    return openTransaction(null);
+  }
+
+  /**
+   * Opens a new one or the one already created. Every call of this function must
+   * have corresponding commit or rollback function call.
+   *
+   * @param isolationLevel The transaction isolation level. Only possible to set on the first call.
+   * @return an active transaction
+   */
+  @Override
+  public boolean openTransaction(String isolationLevel) {
     openTrasactionCalls++;
     if (openTrasactionCalls == 1) {
       currentTransaction = pm.currentTransaction();
+      if (isolationLevel != null) {
+        currentTransaction.setIsolationLevel(isolationLevel);
+      }
       currentTransaction.begin();
       transactionStatus = TXN_STATUS.OPEN;
     } else {
@@ -581,10 +595,16 @@ public class ObjectStore implements RawStore, Configurable {
         throw new RuntimeException("openTransaction called in an interior"
             + " transaction scope, but currentTransaction is not active.");
       }
+
+      // Can not change the isolation level on an already open transaction
+      if (isolationLevel != null && !isolationLevel.equals(currentTransaction.getIsolationLevel())) {
+        throw new RuntimeException("Can not set isolation level on an open transaction");
+      }
     }
 
     boolean result = currentTransaction.isActive();
-    debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
+    debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result + ", isolationLevel = "
+            + currentTransaction.getIsolationLevel());
     return result;
   }
 
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 639a1d8d60c..3e50db451f9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -77,13 +77,21 @@ public interface RawStore extends Configurable {
   public abstract void shutdown();
 
   /**
-   * Opens a new one or the one already created Every call of this function must
-   * have corresponding commit or rollback function call
+   * Opens a new one or the one already created. Every call of this function must
+   * have corresponding commit or rollback function call.
    *
    * @return an active transaction
    */
+  boolean openTransaction();
 
-  public abstract boolean openTransaction();
+  /**
+   * Opens a new one or the one already created. Every call of this function must
+   * have corresponding commit or rollback function call.
+   *
+   * @param isolationLevel The transaction isolation level. Only possible to set on the first call.
+   * @return an active transaction
+   */
+  boolean openTransaction(String isolationLevel);
 
   /**
    * if this is the commit of the first open call then an actual commit is
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
new file mode 100644
index 00000000000..50e779a22bf
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+/**
+ * Base builder for all types of constraints.  Database name, table name, and column name
+ * must be provided.
+ * @param <T> Type of builder extending this.
+ */
+abstract class ConstraintBuilder<T> {
+  protected String dbName, tableName, columnName, constraintName;
+  protected int keySeq;
+  protected boolean enable, validate, rely;
+  private T child;
+
+  protected ConstraintBuilder() {
+    keySeq = 1;
+    enable = true;
+    validate = rely = false;
+  }
+
+  protected void setChild(T child) {
+    this.child = child;
+  }
+
+  protected void checkBuildable(String defaultConstraintName) throws MetaException {
+    if (dbName == null || tableName == null || columnName == null) {
+      throw new MetaException("You must provide database name, table name, and column name");
+    }
+    if (constraintName == null) {
+      constraintName = dbName + "_" + tableName + "_" + columnName + "_" + defaultConstraintName;
+    }
+  }
+
+  public T setDbName(String dbName) {
+    this.dbName = dbName;
+    return child;
+  }
+
+  public T setTableName(String tableName) {
+    this.tableName = tableName;
+    return child;
+  }
+
+  public T setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return child;
+  }
+
+  public T setColumnName(String columnName) {
+    this.columnName = columnName;
+    return child;
+  }
+
+  public T setConstraintName(String constraintName) {
+    this.constraintName = constraintName;
+    return child;
+  }
+
+  public T setKeySeq(int keySeq) {
+    this.keySeq = keySeq;
+    return child;
+  }
+
+  public T setEnable(boolean enable) {
+    this.enable = enable;
+    return child;
+  }
+
+  public T setValidate(boolean validate) {
+    this.validate = validate;
+    return child;
+  }
+
+  public T setRely(boolean rely) {
+    this.rely = rely;
+    return child;
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
new file mode 100644
index 00000000000..99addedc162
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A builder for {@link Database}.  The name of the new database is required.  Everything else
+ * selects reasonable defaults.
+ */
+public class DatabaseBuilder {
+  private String name, description, location;
+  private Map<String, String> params = new HashMap<>();
+  private String ownerName;
+  private PrincipalType ownerType;
+
+  public DatabaseBuilder setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  public DatabaseBuilder setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public DatabaseBuilder setLocation(String location) {
+    this.location = location;
+    return this;
+  }
+
+  public DatabaseBuilder setParams(Map<String, String> params) {
+    this.params = params;
+    return this;
+  }
+
+  public DatabaseBuilder addParam(String key, String value) {
+    params.put(key, value);
+    return this;
+  }
+
+  public DatabaseBuilder setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+    return this;
+  }
+
+  public DatabaseBuilder setOwnerType(PrincipalType ownerType) {
+    this.ownerType = ownerType;
+    return this;
+  }
+
+  public Database build() throws TException {
+    if (name == null) throw new MetaException("You must name the database");
+    Database db = new Database(name, description, location, params);
+    try {
+      if (ownerName != null) ownerName = SecurityUtils.getUser();
+      db.setOwnerName(ownerName);
+      if (ownerType == null) ownerType = PrincipalType.USER;
+      db.setOwnerType(ownerType);
+      return db;
+    } catch (IOException e) {
+      throw new MetaException(e.getMessage());
+    }
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
new file mode 100644
index 00000000000..378b56b3792
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.FunctionType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.ResourceUri;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for creating Thrift Function objects for tests, and API usage.
+ */
+public class FunctionBuilder {
+  private String dbName = "default";
+  private String funcName = null;
+  private String className = null;
+  private String owner = null;
+  private PrincipalType ownerType;
+  private int createTime;
+  private FunctionType funcType;
+  private List<ResourceUri> resourceUris;
+
+  public FunctionBuilder() {
+    // Set some reasonable defaults
+    ownerType = PrincipalType.USER;
+    createTime = (int) (System.currentTimeMillis() / 1000);
+    funcType = FunctionType.JAVA;
+    resourceUris = new ArrayList<ResourceUri>();
+  }
+
+  public FunctionBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public FunctionBuilder setDbName(Database db) {
+    this.dbName = db.getName();
+    return this;
+  }
+
+  public FunctionBuilder setName(String funcName) {
+    this.funcName = funcName;
+    return this;
+  }
+
+  public FunctionBuilder setClass(String className) {
+    this.className = className;
+    return this;
+  }
+
+  public FunctionBuilder setOwner(String owner) {
+    this.owner = owner;
+    return this;
+  }
+
+  public FunctionBuilder setOwnerType(PrincipalType ownerType) {
+    this.ownerType = ownerType;
+    return this;
+  }
+
+  public FunctionBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public FunctionBuilder setFunctionType(FunctionType funcType) {
+    this.funcType = funcType;
+    return this;
+  }
+
+  public FunctionBuilder setResourceUris(List<ResourceUri> resourceUris) {
+    this.resourceUris = resourceUris;
+    return this;
+  }
+
+  public FunctionBuilder addResourceUri(ResourceUri resourceUri) {
+    this.resourceUris.add(resourceUri);
+    return this;
+  }
+
+  public Function build() throws MetaException {
+    try {
+      if (owner != null) {
+        owner = SecurityUtils.getUser();
+      }
+    } catch (IOException e) {
+      throw new MetaException(e.getMessage());
+    }
+    return new Function(funcName, dbName, className, owner, ownerType, createTime, funcType,
+        resourceUris);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
new file mode 100644
index 00000000000..26cea191e16
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
+import org.apache.hadoop.hive.metastore.api.GrantRevokeType;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+
+/**
+ * A builder for {@link GrantRevokePrivilegeRequest}.  The revoke of grant option defaults to
+ * false.  The request Type and the privileges must be provided.
+ */
+public class GrantRevokePrivilegeRequestBuilder {
+  private GrantRevokeType requestType;
+  private PrivilegeBag privileges;
+  private boolean revokeGrantOption;
+
+  public GrantRevokePrivilegeRequestBuilder() {
+    privileges = new PrivilegeBag();
+    revokeGrantOption = false;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder setRequestType(GrantRevokeType requestType) {
+    this.requestType = requestType;
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder setRevokeGrantOption(boolean revokeGrantOption) {
+    this.revokeGrantOption = revokeGrantOption;
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder addPrivilege(HiveObjectPrivilege privilege) {
+    privileges.addToPrivileges(privilege);
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequest build() throws MetaException {
+    if (requestType == null || privileges.getPrivilegesSize() == 0) {
+      throw new MetaException("The request type and at least one privilege must be provided.");
+    }
+    GrantRevokePrivilegeRequest rqst = new GrantRevokePrivilegeRequest(requestType, privileges);
+    if (revokeGrantOption) rqst.setRevokeGrantOption(revokeGrantOption);
+    return rqst;
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
new file mode 100644
index 00000000000..d802e1a971a
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+
+/**
+ * Builder for {@link HiveObjectPrivilege}.  All values must be set.
+ */
+public class HiveObjectPrivilegeBuilder {
+  private HiveObjectRef hiveObjectRef;
+  private String principleName;
+  private PrincipalType principalType;
+  private PrivilegeGrantInfo grantInfo;
+
+  public HiveObjectPrivilegeBuilder setHiveObjectRef(HiveObjectRef hiveObjectRef) {
+    this.hiveObjectRef = hiveObjectRef;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setPrincipleName(String principleName) {
+    this.principleName = principleName;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setPrincipalType(PrincipalType principalType) {
+    this.principalType = principalType;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setGrantInfo(PrivilegeGrantInfo grantInfo) {
+    this.grantInfo = grantInfo;
+    return this;
+  }
+
+  public HiveObjectPrivilege build() throws MetaException {
+    if (hiveObjectRef == null || principleName == null || principalType == null ||
+        grantInfo == null) {
+      throw new MetaException("hive object reference, principle name and type, and grant info " +
+          "must all be provided");
+    }
+    return new HiveObjectPrivilege(hiveObjectRef, principleName, principalType, grantInfo);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
new file mode 100644
index 00000000000..94c871354d2
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * A builder for {@link HiveObjectRef}.  Unlike most builders (which allow a gradual building up
+ * of the values) this gives a number of methods that take the object to be referenced and then
+ * build the appropriate reference.  This is intended primarily for use with
+ * {@link HiveObjectPrivilegeBuilder}
+ */
+public class HiveObjectRefBuilder {
+  private HiveObjectType objectType;
+  private String dbName, objectName, columnName;
+  private List<String> partValues;
+
+  public HiveObjectRef buildGlobalReference() {
+    return new HiveObjectRef(HiveObjectType.GLOBAL, null, null, new ArrayList<String>(), null);
+  }
+
+  public HiveObjectRef buildDatabaseReference(Database db) {
+    return new
+        HiveObjectRef(HiveObjectType.DATABASE, db.getName(), null, new ArrayList<String>(), null);
+  }
+
+  public HiveObjectRef buildTableReference(Table table) {
+    return new HiveObjectRef(HiveObjectType.TABLE, table.getDbName(), table.getTableName(),
+        new ArrayList<String>(), null);
+  }
+
+  public HiveObjectRef buildPartitionReference(Partition part) {
+    return new HiveObjectRef(HiveObjectType.PARTITION, part.getDbName(), part.getTableName(),
+        part.getValues(), null);
+  }
+
+  public HiveObjectRef buildColumnReference(Table table, String columnName) {
+    return new HiveObjectRef(HiveObjectType.TABLE, table.getDbName(), table.getTableName(),
+        new ArrayList<String>(), columnName);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
new file mode 100644
index 00000000000..6c8b1d80a90
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Builder for indices.  You must supply the database name and table name (or table reference), a
+ * name for the index, and whatever StorageDescriptorBuilder requires.  All other fields will be
+ * given reasonable defaults.
+ */
+public class IndexBuilder extends StorageDescriptorBuilder<IndexBuilder> {
+  private String dbName, tableName, indexName, indexTableName, handlerClass;
+  private int createTime, lastAccessTime;
+  private Map<String, String> indexParams;
+  private boolean deferredRebuild;
+
+  public IndexBuilder() {
+    // Set some reasonable defaults
+    indexParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    super.setChild(this);
+  }
+
+  public IndexBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public IndexBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public IndexBuilder setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return this;
+  }
+
+  public IndexBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public IndexBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public IndexBuilder setIndexParams(Map<String, String> indexParams) {
+    this.indexParams = indexParams;
+    return this;
+  }
+
+  public IndexBuilder setIndexName(String indexName) {
+    this.indexName = indexName;
+    return this;
+  }
+
+  public IndexBuilder setIndexTableName(String indexTableName) {
+    this.indexTableName = indexTableName;
+    return this;
+  }
+
+  public IndexBuilder setHandlerClass(String handlerClass) {
+    this.handlerClass = handlerClass;
+    return this;
+  }
+
+  public IndexBuilder setDeferredRebuild(boolean deferredRebuild) {
+    this.deferredRebuild = deferredRebuild;
+    return this;
+  }
+
+  public Index build() throws MetaException {
+    if (dbName == null || tableName == null || indexName == null) {
+      throw new MetaException("You must provide database name, table name, and index name");
+    }
+    if (indexTableName == null) indexTableName = tableName + "_" + indexName + "_table";
+    return new Index(indexName, handlerClass, dbName, tableName, createTime, lastAccessTime,
+        indexTableName, buildSd(), indexParams, deferredRebuild);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
new file mode 100644
index 00000000000..265625f95ca
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builder for {@link Partition}.  The only requirements are 1. (database name and table name) or table
+ * reference; 2. partition values; 3. whatever {@link StorageDescriptorBuilder} requires.
+ */
+public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder> {
+  private String dbName, tableName;
+  private int createTime, lastAccessTime;
+  private Map<String, String> partParams;
+  private List<String> values;
+
+  public PartitionBuilder() {
+    // Set some reasonable defaults
+    partParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    super.setChild(this);
+  }
+
+  public PartitionBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public PartitionBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public PartitionBuilder setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return this;
+  }
+
+  public PartitionBuilder setValues(List<String> values) {
+    this.values = values;
+    return this;
+  }
+
+  public PartitionBuilder addValue(String value) {
+    if (values == null) values = new ArrayList<>();
+    values.add(value);
+    return this;
+  }
+
+  public PartitionBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public PartitionBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public PartitionBuilder setPartParams(Map<String, String> partParams) {
+    this.partParams = partParams;
+    return this;
+  }
+
+  public PartitionBuilder addPartParam(String key, String value) {
+    if (partParams == null) partParams = new HashMap<>();
+    partParams.put(key, value);
+    return this;
+  }
+
+  public Partition build() throws MetaException {
+    if (dbName == null || tableName == null) {
+      throw new MetaException("database name and table name must be provided");
+    }
+    if (values == null) throw new MetaException("You must provide partition values");
+    return new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(),
+        partParams);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java
new file mode 100644
index 00000000000..213798e262d
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+
+import java.io.IOException;
+
+/**
+ * Builder for {@link PrivilegeGrantInfo}.  The privilege is required.  If not provided the grantor
+ * is
+ * assumed to be the current user.  This is really intended for use by the
+ * {@link HiveObjectPrivilegeBuilder}.
+ */
+public class PrivilegeGrantInfoBuilder {
+  private String privilege, grantor;
+  private int createTime;
+  private PrincipalType grantorType;
+  private boolean grantOption;
+
+  public PrivilegeGrantInfoBuilder() {
+    createTime = (int)(System.currentTimeMillis() / 1000);
+    grantOption = false;
+  }
+
+  public PrivilegeGrantInfoBuilder setPrivilege(String privilege) {
+    this.privilege = privilege;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantor(String grantor) {
+    this.grantor = grantor;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantorType(PrincipalType grantorType) {
+    this.grantorType = grantorType;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantOption(boolean grantOption) {
+    this.grantOption = grantOption;
+    return this;
+  }
+
+  public PrivilegeGrantInfo build() throws MetaException {
+    if (privilege == null) {
+      throw new MetaException("Privilege must be provided.");
+    }
+    if (grantor == null) {
+      try {
+        grantor = SecurityUtils.getUser();
+        grantorType = PrincipalType.USER;
+      } catch (IOException e) {
+        throw new MetaException(e.getMessage());
+      }
+    }
+    return new PrivilegeGrantInfo(privilege, createTime, grantor, grantorType, grantOption);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java
new file mode 100644
index 00000000000..0b8d189f31a
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Role;
+
+/**
+ * A builder for {@link Role}.  The roleName and the ownerName must be provided.
+ */
+public class RoleBuilder {
+  private String roleName, ownerName;
+  private int createTime;
+
+  public RoleBuilder() {
+    createTime = (int)(System.currentTimeMillis() / 1000);
+  }
+
+  public RoleBuilder setRoleName(String roleName) {
+    this.roleName = roleName;
+    return this;
+  }
+
+  public RoleBuilder setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+    return this;
+  }
+
+  public RoleBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public Role build() throws MetaException {
+    if (roleName == null || ownerName == null) {
+      throw new MetaException("role name and owner name must be provided.");
+    }
+    return new Role(roleName, createTime, ownerName);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
new file mode 100644
index 00000000000..a39319a1e4d
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+
+/**
+ * Builder for {@link SQLForeignKey}.  Requires what {@link ConstraintBuilder} requires, plus
+ * primary key
+ * database, table, column and name.
+ */
+public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder> {
+  private String pkDb, pkTable, pkColumn, pkName;
+  private int updateRule, deleteRule;
+
+  public SQLForeignKeyBuilder() {
+    updateRule = deleteRule = 0;
+  }
+
+  public SQLForeignKeyBuilder setPkDb(String pkDb) {
+    this.pkDb = pkDb;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkTable(String pkTable) {
+    this.pkTable = pkTable;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkColumn(String pkColumn) {
+    this.pkColumn = pkColumn;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkName(String pkName) {
+    this.pkName = pkName;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPrimaryKey(SQLPrimaryKey pk) {
+    pkDb = pk.getTable_db();
+    pkTable = pk.getTable_name();
+    pkColumn = pk.getColumn_name();
+    pkName = pk.getPk_name();
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setUpdateRule(int updateRule) {
+    this.updateRule = updateRule;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setDeleteRule(int deleteRule) {
+    this.deleteRule = deleteRule;
+    return this;
+  }
+
+  public SQLForeignKey build() throws MetaException {
+    checkBuildable("foreign_key");
+    if (pkDb == null || pkTable == null || pkColumn == null || pkName == null) {
+      throw new MetaException("You must provide the primary key database, table, column, and name");
+    }
+    return new SQLForeignKey(pkDb, pkTable, pkColumn, dbName, tableName, columnName, keySeq,
+        updateRule, deleteRule, constraintName, pkName, enable, validate, rely);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
new file mode 100644
index 00000000000..9000f861676
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+
+/**
+ * Builder for {@link SQLPrimaryKey}.  Only requires what {@link ConstraintBuilder} requires.
+ */
+public class SQLPrimaryKeyBuilder extends ConstraintBuilder<SQLPrimaryKeyBuilder> {
+
+  public SQLPrimaryKeyBuilder() {
+    super.setChild(this);
+  }
+
+  // Just to translate
+  public SQLPrimaryKeyBuilder setPrimaryKeyName(String name) {
+    return setConstraintName(name);
+  }
+
+  public SQLPrimaryKey build() throws MetaException {
+    checkBuildable("primary_key");
+    return new SQLPrimaryKey(dbName, tableName, columnName, keySeq, constraintName, enable,
+        validate, rely);
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
new file mode 100644
index 00000000000..39d1fa210ce
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builds a {@link StorageDescriptor}.  Only requires that columns be set.  It picks reasonable
+ * defaults for everything else.  This is intended for use just by objects that have a StorageDescriptor,
+ * not direct use.
+ */
+abstract class StorageDescriptorBuilder<T> {
+  private static final String SERDE_LIB = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
+  private static final String INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveInputFormat";
+  private static final String OUTPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveOutputFormat";
+
+  private String location, inputFormat, outputFormat, serdeName, serdeLib;
+  private List<FieldSchema> cols;
+  private int numBuckets;
+  private Map<String, String> storageDescriptorParams, serdeParams;
+  private boolean compressed, storedAsSubDirectories;
+  private List<String> bucketCols, skewedColNames;
+  private List<Order> sortCols;
+  private List<List<String>> skewedColValues;
+  private Map<List<String>, String> skewedColValueLocationMaps;
+  // This enables us to return the correct type from the builder
+  private T child;
+
+  protected StorageDescriptorBuilder() {
+    // Set some reasonable defaults
+    storageDescriptorParams = new HashMap<>();
+    serdeParams = new HashMap<>();
+    bucketCols = new ArrayList<>();
+    sortCols = new ArrayList<>();
+    numBuckets = 0;
+    compressed = false;
+    inputFormat = INPUT_FORMAT;
+    outputFormat = OUTPUT_FORMAT;
+    serdeLib = SERDE_LIB;
+    skewedColNames = new ArrayList<>();
+    skewedColValues = new ArrayList<>();
+    skewedColValueLocationMaps = new HashMap<>();
+  }
+
+  protected StorageDescriptor buildSd() throws MetaException {
+    if (cols == null) throw new MetaException("You must provide the columns");
+    SerDeInfo serdeInfo = new SerDeInfo(serdeName, serdeLib, serdeParams);
+    StorageDescriptor sd = new StorageDescriptor(cols, location, inputFormat, outputFormat,
+        compressed, numBuckets, serdeInfo, bucketCols, sortCols, storageDescriptorParams);
+    sd.setStoredAsSubDirectories(storedAsSubDirectories);
+    if (skewedColNames != null) {
+      SkewedInfo skewed = new SkewedInfo(skewedColNames, skewedColValues,
+          skewedColValueLocationMaps);
+      sd.setSkewedInfo(skewed);
+    }
+    return sd;
+  }
+
+  protected void setChild(T child) {
+    this.child = child;
+  }
+
+  public T setLocation(String location) {
+    this.location = location;
+    return child;
+  }
+
+  public T setInputFormat(String inputFormat) {
+    this.inputFormat = inputFormat;
+    return child;
+  }
+
+  public T setOutputFormat(String outputFormat) {
+    this.outputFormat = outputFormat;
+    return child;
+  }
+
+  public T setSerdeName(String serdeName) {
+    this.serdeName = serdeName;
+    return child;
+  }
+
+  public T setSerdeLib(String serdeLib) {
+    this.serdeLib = serdeLib;
+    return child;
+  }
+  public T setCols(List<FieldSchema> cols) {
+    this.cols = cols;
+    return child;
+  }
+
+  public T addCol(String name, String type, String comment) {
+    if (cols == null) cols = new ArrayList<>();
+    cols.add(new FieldSchema(name, type, comment));
+    return child;
+  }
+
+  public T addCol(String name, String type) {
+    return addCol(name, type, "");
+  }
+
+  public T setNumBuckets(int numBuckets) {
+    this.numBuckets = numBuckets;
+    return child;
+  }
+
+  public T setStorageDescriptorParams(
+      Map<String, String> storageDescriptorParams) {
+    this.storageDescriptorParams = storageDescriptorParams;
+    return child;
+  }
+
+  public T addStorageDescriptorParam(String key, String value) {
+    if (storageDescriptorParams == null) storageDescriptorParams = new HashMap<>();
+    storageDescriptorParams.put(key, value);
+    return child;
+  }
+
+  public T setSerdeParams(Map<String, String> serdeParams) {
+    this.serdeParams = serdeParams;
+    return child;
+  }
+
+  public T addSerdeParam(String key, String value) {
+    if (serdeParams == null) serdeParams = new HashMap<>();
+    serdeParams.put(key, value);
+    return child;
+  }
+
+  public T setCompressed(boolean compressed) {
+    this.compressed = compressed;
+    return child;
+  }
+
+  public T setStoredAsSubDirectories(boolean storedAsSubDirectories) {
+    this.storedAsSubDirectories = storedAsSubDirectories;
+    return child;
+  }
+
+  public T setBucketCols(List<String> bucketCols) {
+    this.bucketCols = bucketCols;
+    return child;
+  }
+
+  public T addBucketCol(String bucketCol) {
+    if (bucketCols == null) bucketCols = new ArrayList<>();
+    bucketCols.add(bucketCol);
+    return child;
+  }
+
+  public T setSkewedColNames(List<String> skewedColNames) {
+    this.skewedColNames = skewedColNames;
+    return child;
+  }
+
+  public T addSkewedColName(String skewedColName) {
+    if (skewedColNames == null) skewedColNames = new ArrayList<>();
+    skewedColNames.add(skewedColName);
+    return child;
+  }
+
+  public T setSortCols(List<Order> sortCols) {
+    this.sortCols = sortCols;
+    return child;
+  }
+
+  public T addSortCol(String col, int order) {
+    if (sortCols == null) sortCols = new ArrayList<>();
+    sortCols.add(new Order(col, order));
+    return child;
+  }
+
+  // It is not at all clear how to flatten these last two out in a useful way, and no one uses
+  // these anyway.
+  public T setSkewedColValues(List<List<String>> skewedColValues) {
+    this.skewedColValues = skewedColValues;
+    return child;
+  }
+
+  public T setSkewedColValueLocationMaps(
+      Map<List<String>, String> skewedColValueLocationMaps) {
+    this.skewedColValueLocationMaps = skewedColValueLocationMaps;
+    return child;
+  }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
new file mode 100644
index 00000000000..0b005858c06
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Build a {@link Table}.  The database name and table name must be provided, plus whatever is
+ * needed by the underlying {@link StorageDescriptorBuilder}.
+ */
+public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
+  private String dbName, tableName, owner, viewOriginalText, viewExpandedText, type;
+  private List<FieldSchema> partCols;
+  private int createTime, lastAccessTime, retention;
+  private Map<String, String> tableParams;
+  private boolean rewriteEnabled, temporary;
+
+  public TableBuilder() {
+    // Set some reasonable defaults
+    tableParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    retention = 0;
+    super.setChild(this);
+  }
+
+  public TableBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public TableBuilder setDbName(Database db) {
+    this.dbName = db.getName();
+    return this;
+  }
+
+  public TableBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public TableBuilder setOwner(String owner) {
+    this.owner = owner;
+    return this;
+  }
+
+  public TableBuilder setViewOriginalText(String viewOriginalText) {
+    this.viewOriginalText = viewOriginalText;
+    return this;
+  }
+
+  public TableBuilder setViewExpandedText(String viewExpandedText) {
+    this.viewExpandedText = viewExpandedText;
+    return this;
+  }
+
+  public TableBuilder setType(String type) {
+    this.type = type;
+    return this;
+  }
+
+  public TableBuilder setPartCols(List<FieldSchema> partCols) {
+    this.partCols = partCols;
+    return this;
+  }
+
+  public TableBuilder addPartCol(String name, String type, String comment) {
+    if (partCols == null) partCols = new ArrayList<>();
+    partCols.add(new FieldSchema(name, type, comment));
+    return this;
+  }
+
+  public TableBuilder addPartCol(String name, String type) {
+    return addPartCol(name, type, "");
+  }
+
+  public TableBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public TableBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public TableBuilder setRetention(int retention) {
+    this.retention = retention;
+    return this;
+  }
+
+  public TableBuilder setTableParams(Map<String, String> tableParams) {
+    this.tableParams = tableParams;
+    return this;
+  }
+
+  public TableBuilder addTableParam(String key, String value) {
+    if (tableParams == null) tableParams = new HashMap<>();
+    tableParams.put(key, value);
+    return this;
+  }
+
+  public TableBuilder setRewriteEnabled(boolean rewriteEnabled) {
+    this.rewriteEnabled = rewriteEnabled;
+    return this;
+  }
+
+  public TableBuilder setTemporary(boolean temporary) {
+    this.temporary = temporary;
+    return this;
+  }
+
+  public Table build() throws MetaException {
+    if (dbName == null || tableName == null) {
+      throw new MetaException("You must set the database and table name");
+    }
+    if (owner == null) {
+      try {
+        owner = SecurityUtils.getUser();
+      } catch (IOException e) {
+        throw new MetaException(e.getMessage());
+      }
+    }
+    Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(),
+        partCols, tableParams, viewOriginalText, viewExpandedText, type);
+    if (rewriteEnabled) t.setRewriteEnabled(true);
+    if (temporary) t.setTemporary(temporary);
+    return t;
+  }
+
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 83cefb73d19..17f296066e6 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -130,6 +130,12 @@ public class HBaseStore implements RawStore {
     return true;
   }
 
+
+  @Override
+  public boolean openTransaction(String isolationLevel) {
+    throw new UnsupportedOperationException("Not supported");
+  }
+
   @Override
   public boolean commitTransaction() {
     if (--txnNestLevel == 0) {
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
new file mode 100644
index 00000000000..77ef4543413
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.thrift.DBTokenStore;
+import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
+import org.apache.hadoop.hive.thrift.MemoryTokenStore;
+import org.apache.hadoop.hive.thrift.ZooKeeperTokenStore;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import org.apache.thrift.transport.TSSLTransportFactory;
+import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.SSLParameters;
+import javax.net.ssl.SSLServerSocket;
+import javax.net.ssl.SSLSocket;
+import javax.security.auth.login.LoginException;
+import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class SecurityUtils {
+    private static final Logger LOG = LoggerFactory.getLogger(SecurityUtils.class);
+
+    public static UserGroupInformation getUGI() throws LoginException, IOException {
+        String doAs = System.getenv("HADOOP_USER_NAME");
+        if (doAs != null && doAs.length() > 0) {
+            /*
+             * this allows doAs (proxy user) to be passed along across process boundary where
+             * delegation tokens are not supported.  For example, a DDL stmt via WebHCat with
+             * a doAs parameter, forks to 'hcat' which needs to start a Session that
+             * proxies the end user
+             */
+            return UserGroupInformation.createProxyUser(doAs, UserGroupInformation.getLoginUser());
+        }
+        return UserGroupInformation.getCurrentUser();
+    }
+    /**
+     * Dynamically sets up the JAAS configuration that uses kerberos
+     * @param principal
+     * @param keyTabFile
+     * @throws IOException
+     */
+    public static void setZookeeperClientKerberosJaasConfig(String principal, String keyTabFile) throws IOException {
+        // ZooKeeper property name to pick the correct JAAS conf section
+        final String SASL_LOGIN_CONTEXT_NAME = "HiveZooKeeperClient";
+        System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, SASL_LOGIN_CONTEXT_NAME);
+
+        principal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0");
+        JaasConfiguration jaasConf = new JaasConfiguration(SASL_LOGIN_CONTEXT_NAME, principal, keyTabFile);
+
+        // Install the Configuration in the runtime.
+        javax.security.auth.login.Configuration.setConfiguration(jaasConf);
+    }
+
+    /**
+     * A JAAS configuration for ZooKeeper clients intended to use for SASL
+     * Kerberos.
+     */
+    private static class JaasConfiguration extends javax.security.auth.login.Configuration {
+        // Current installed Configuration
+        private static final boolean IBM_JAVA = System.getProperty("java.vendor")
+                .contains("IBM");
+        private final javax.security.auth.login.Configuration baseConfig = javax.security.auth.login.Configuration
+                .getConfiguration();
+        private final String loginContextName;
+        private final String principal;
+        private final String keyTabFile;
+
+        public JaasConfiguration(String hiveLoginContextName, String principal, String keyTabFile) {
+            this.loginContextName = hiveLoginContextName;
+            this.principal = principal;
+            this.keyTabFile = keyTabFile;
+        }
+
+        @Override
+        public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
+            if (loginContextName.equals(appName)) {
+                Map<String, String> krbOptions = new HashMap<String, String>();
+                if (IBM_JAVA) {
+                    krbOptions.put("credsType", "both");
+                    krbOptions.put("useKeytab", keyTabFile);
+                } else {
+                    krbOptions.put("doNotPrompt", "true");
+                    krbOptions.put("storeKey", "true");
+                    krbOptions.put("useKeyTab", "true");
+                    krbOptions.put("keyTab", keyTabFile);
+                }
+                krbOptions.put("principal", principal);
+                krbOptions.put("refreshKrb5Config", "true");
+                AppConfigurationEntry hiveZooKeeperClientEntry = new AppConfigurationEntry(
+                        KerberosUtil.getKrb5LoginModuleName(), LoginModuleControlFlag.REQUIRED, krbOptions);
+                return new AppConfigurationEntry[] { hiveZooKeeperClientEntry };
+            }
+            // Try the base config
+            if (baseConfig != null) {
+                return baseConfig.getAppConfigurationEntry(appName);
+            }
+            return null;
+        }
+    }
+
+    /**
+     * Get the string form of the token given a token signature. The signature is used as the value of
+     * the "service" field in the token for lookup. Ref: AbstractDelegationTokenSelector in Hadoop. If
+     * there exists such a token in the token cache (credential store) of the job, the lookup returns
+     * that. This is relevant only when running against a "secure" hadoop release The method gets hold
+     * of the tokens if they are set up by hadoop - this should happen on the map/reduce tasks if the
+     * client added the tokens into hadoop's credential store in the front end during job submission.
+     * The method will select the hive delegation token among the set of tokens and return the string
+     * form of it
+     *
+     * @param tokenSignature
+     * @return the string form of the token found
+     * @throws IOException
+     */
+    public static String getTokenStrForm(String tokenSignature) throws IOException {
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        TokenSelector<? extends TokenIdentifier> tokenSelector = new DelegationTokenSelector();
+
+        Token<? extends TokenIdentifier> token = tokenSelector.selectToken(
+                tokenSignature == null ? new Text() : new Text(tokenSignature), ugi.getTokens());
+        return token != null ? token.encodeToUrlString() : null;
+    }
+
+    /**
+     * Create a delegation token object for the given token string and service. Add the token to given
+     * UGI
+     *
+     * @param ugi
+     * @param tokenStr
+     * @param tokenService
+     * @throws IOException
+     */
+    public static void setTokenStr(UserGroupInformation ugi, String tokenStr, String tokenService)
+            throws IOException {
+        Token<DelegationTokenIdentifier> delegationToken = createToken(tokenStr, tokenService);
+        ugi.addToken(delegationToken);
+    }
+
+    /**
+     * Create a new token using the given string and service
+     *
+     * @param tokenStr
+     * @param tokenService
+     * @return
+     * @throws IOException
+     */
+    private static Token<DelegationTokenIdentifier> createToken(String tokenStr, String tokenService)
+            throws IOException {
+        Token<DelegationTokenIdentifier> delegationToken = new Token<DelegationTokenIdentifier>();
+        delegationToken.decodeFromUrlString(tokenStr);
+        delegationToken.setService(new Text(tokenService));
+        return delegationToken;
+    }
+
+    private static final String DELEGATION_TOKEN_STORE_CLS = "hive.cluster.delegation.token.store.class";
+
+    /**
+     * This method should be used to return the metastore specific tokenstore class name to main
+     * backwards compatibility
+     *
+     * @param conf - HiveConf object
+     * @return the tokenStoreClass name from the HiveConf. It maps the hive specific tokenstoreclass
+     *         name to metastore module specific class name. For eg:
+     *         hive.cluster.delegation.token.store.class is set to
+     *         org.apache.hadoop.hive.thrift.MemoryTokenStore it returns the equivalent tokenstore
+     *         class defined in the metastore module which is
+     *         org.apache.hadoop.hive.metastore.security.MemoryTokenStore Similarly,
+     *         org.apache.hadoop.hive.thrift.DBTokenStore maps to
+     *         org.apache.hadoop.hive.metastore.security.DBTokenStore and
+     *         org.apache.hadoop.hive.thrift.ZooKeeperTokenStore maps to
+     *         org.apache.hadoop.hive.metastore.security.ZooKeeperTokenStore
+     */
+    public static String getTokenStoreClassName(Configuration conf) {
+        String tokenStoreClass = conf.get(DELEGATION_TOKEN_STORE_CLS, "");
+        if (StringUtils.isBlank(tokenStoreClass)) {
+            // default tokenstore is MemoryTokenStore
+            return MemoryTokenStore.class.getName();
+        }
+        switch (tokenStoreClass) {
+            case "org.apache.hadoop.hive.thrift.DBTokenStore":
+                return DBTokenStore.class.getName();
+            case "org.apache.hadoop.hive.thrift.MemoryTokenStore":
+                return MemoryTokenStore.class.getName();
+            case "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore":
+                return ZooKeeperTokenStore.class.getName();
+            default:
+                return tokenStoreClass;
+        }
+    }
+
+
+    /**
+     * @return the user name set in hadoop.job.ugi param or the current user from System
+     * @throws IOException if underlying Hadoop call throws LoginException
+     */
+    public static String getUser() throws IOException {
+        try {
+            UserGroupInformation ugi = getUGI();
+            return ugi.getUserName();
+        } catch (LoginException le) {
+            throw new IOException(le);
+        }
+    }
+
+    public static TServerSocket getServerSocket(String hiveHost, int portNum) throws TTransportException {
+        InetSocketAddress serverAddress;
+        if (hiveHost == null || hiveHost.isEmpty()) {
+            // Wildcard bind
+            serverAddress = new InetSocketAddress(portNum);
+        } else {
+            serverAddress = new InetSocketAddress(hiveHost, portNum);
+        }
+        return new TServerSocket(serverAddress);
+    }
+
+    public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath,
+                                                   String keyStorePassWord, List<String> sslVersionBlacklist) throws TTransportException,
+            UnknownHostException {
+        TSSLTransportFactory.TSSLTransportParameters params =
+                new TSSLTransportFactory.TSSLTransportParameters();
+        params.setKeyStore(keyStorePath, keyStorePassWord);
+        InetSocketAddress serverAddress;
+        if (hiveHost == null || hiveHost.isEmpty()) {
+            // Wildcard bind
+            serverAddress = new InetSocketAddress(portNum);
+        } else {
+            serverAddress = new InetSocketAddress(hiveHost, portNum);
+        }
+        TServerSocket thriftServerSocket =
+                TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params);
+        if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) {
+            List<String> sslVersionBlacklistLocal = new ArrayList<>();
+            for (String sslVersion : sslVersionBlacklist) {
+                sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase());
+            }
+            SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket();
+            List<String> enabledProtocols = new ArrayList<>();
+            for (String protocol : sslServerSocket.getEnabledProtocols()) {
+                if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) {
+                    LOG.debug("Disabling SSL Protocol: " + protocol);
+                } else {
+                    enabledProtocols.add(protocol);
+                }
+            }
+            sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0]));
+            LOG.info("SSL Server Socket Enabled Protocols: "
+                    + Arrays.toString(sslServerSocket.getEnabledProtocols()));
+        }
+        return thriftServerSocket;
+    }
+
+    public static TTransport getSSLSocket(String host, int port, int loginTimeout,
+                                          String trustStorePath, String trustStorePassWord) throws TTransportException {
+        TSSLTransportFactory.TSSLTransportParameters params =
+                new TSSLTransportFactory.TSSLTransportParameters();
+        params.setTrustStore(trustStorePath, trustStorePassWord);
+        params.requireClientAuth(true);
+        // The underlying SSLSocket object is bound to host:port with the given SO_TIMEOUT and
+        // SSLContext created with the given params
+        TSocket tSSLSocket = TSSLTransportFactory.getClientSocket(host, port, loginTimeout, params);
+        return getSSLSocketWithHttps(tSSLSocket);
+    }
+
+    // Using endpoint identification algorithm as HTTPS enables us to do
+    // CNAMEs/subjectAltName verification
+    private static TSocket getSSLSocketWithHttps(TSocket tSSLSocket) throws TTransportException {
+        SSLSocket sslSocket = (SSLSocket) tSSLSocket.getSocket();
+        SSLParameters sslParams = sslSocket.getSSLParameters();
+        sslParams.setEndpointIdentificationAlgorithm("HTTPS");
+        sslSocket.setSSLParameters(sslParams);
+        return new TSocket(sslSocket);
+    }
+}
\ No newline at end of file
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 2babda20627..ec3b2376dd7 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -123,6 +123,11 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
     return objectStore.openTransaction();
   }
 
+  @Override
+  public boolean openTransaction(String isolationLevel) {
+    return objectStore.openTransaction(isolationLevel);
+  }
+
   @Override
   public void rollbackTransaction() {
     objectStore.rollbackTransaction();
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 52391bea6b3..a0f071a042b 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -103,6 +103,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
     return false;
   }
 
+
+  @Override
+  public boolean openTransaction(String isolationLevel) {
+    return false;
+  }
+
   @Override
   public boolean commitTransaction() {
 
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java b/metastore/src/test/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java
new file mode 100644
index 00000000000..e57c6a1f670
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.DefaultPartitionExpressionProxy;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.hadoop.hive.metastore.minihms.MiniHMS;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Factory for creating specific
+ * {@link org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService} implementations for
+ * tests.
+ */
+public class MetaStoreFactoryForTests {
+  private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100;
+
+  /**
+   * We would like to run the tests with 2 MetaStore configurations
+   * - Embedded - Where the MetaStore is running in the same thread, and does not use Thrift
+   * - Remote - Where the MetaStore is started in a different thread, and uses Thrift for
+   * communication
+   *
+   * Or if the test.hms.client.configs system property is set, it would return a single test
+   * MetaStoreService which uses these configs. In this case the MetaStore should be created
+   * manually or by an external application.
+   * @return The list of the test MetaStoreService implementations usable by @Parameterized
+   * .Parameters
+   */
+  public static List<Object[]> getMetaStores() throws Exception {
+    List<Object[]> metaStores = new ArrayList<Object[]>();
+
+    HiveConf conf = new HiveConf();
+    // set some values to use for getting conf. vars
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.METASTORE_METRICS, true);
+    HiveConf.setIntVar(conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX, 2);
+    HiveConf.setIntVar(conf, HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST,
+        DEFAULT_LIMIT_PARTITION_REQUEST);
+    HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS,
+            DefaultPartitionExpressionProxy.class.getName());
+
+    // Do this only on your own peril, and never in the production code
+    conf.set("datanucleus.autoCreateTables", "false");
+
+    // Example for using cluster configuration xml-s
+    // -Dtest.hms.client.configs=/tmp/conf/core-site.xml,/tmp/conf/hive-site.xml
+    String testHMSClientConfiguration = System.getProperty("test.hms.client.configs");
+    if (testHMSClientConfiguration != null) {
+      HiveConf clusterConf = new HiveConf(conf);
+      // Loading the extra configuration options
+      String[] configurationFiles = testHMSClientConfiguration.split(",");
+      for(String configurationFile : configurationFiles) {
+        clusterConf.addResource(new Path(configurationFile));
+      }
+
+      // Using MetaStore running in an existing cluster
+      AbstractMetaStoreService cluster =
+          new MiniHMS.Builder()
+              .setConf(clusterConf)
+              .setType(MiniHMS.MiniHMSType.CLUSTER)
+              .build();
+      metaStores.add(new Object[]{"Cluster", cluster});
+    }
+
+    // Create Embedded MetaStore
+    conf.set("javax.jdo.option.ConnectionURL",
+        "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db1;create=true");
+    AbstractMetaStoreService embedded =
+        new MiniHMS.Builder()
+            .setConf(conf)
+            .setType(MiniHMS.MiniHMSType.EMBEDDED)
+            .build();
+    metaStores.add(new Object[] { "Embedded", embedded});
+
+    // Create Remote MetaStore
+    conf.set("javax.jdo.option.ConnectionURL",
+        "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db2;create=true");
+    AbstractMetaStoreService remote =
+        new MiniHMS.Builder()
+            .setConf(conf)
+            .setType(MiniHMS.MiniHMSType.REMOTE)
+            .build();
+    metaStores.add(new Object[] { "Remote", remote});
+
+    return metaStores;
+  }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestDatabases.java b/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestDatabases.java
new file mode 100644
index 00000000000..2b41881f534
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestDatabases.java
@@ -0,0 +1,622 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransportException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+@RunWith(Parameterized.class)
+public class TestDatabases {
+  private static final Logger LOG = LoggerFactory.getLogger(TestDatabases.class);
+  // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13)
+  // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a
+  // Then we should remove our own copy
+  private static Set<AbstractMetaStoreService> metaStoreServices = null;
+  private static final String DEFAULT_DATABASE = "default";
+  private final AbstractMetaStoreService metaStore;
+  private IMetaStoreClient client;
+  private Database[] testDatabases = new Database[4];
+
+  @Parameterized.Parameters(name = "{0}")
+  public static List<Object[]> getMetaStoreToTest() throws Exception {
+    List<Object[]> result = MetaStoreFactoryForTests.getMetaStores();
+    metaStoreServices = new HashSet<>(result.size());
+    for(Object[] test: result) {
+      metaStoreServices.add((AbstractMetaStoreService) test[1]);
+    }
+    return result;
+  }
+
+  public TestDatabases(String name, AbstractMetaStoreService metaStore) throws Exception {
+    this.metaStore = metaStore;
+    this.metaStore.start();
+  }
+
+  // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13)
+  // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a
+  // Then we should move this to @AfterParam
+  @AfterClass
+  public static void stopMetaStores() throws Exception {
+    for(AbstractMetaStoreService metaStoreService : metaStoreServices) {
+      try {
+        metaStoreService.stop();
+      } catch(Exception e) {
+        // Catch the exceptions, so every other metastore could be stopped as well
+        // Log it, so at least there is a slight possibility we find out about this :)
+        LOG.error("Error stopping MetaStoreService", e);
+      }
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // Get new client
+    client = metaStore.getClient();
+
+    // Clean up the databases
+    for(String databaseName : client.getAllDatabases()) {
+      if (!databaseName.equals(DEFAULT_DATABASE)) {
+        client.dropDatabase(databaseName, true, true, true);
+      }
+    }
+
+    testDatabases[0] =
+        new DatabaseBuilder().setName("test_database_1").build();
+    testDatabases[1] =
+        new DatabaseBuilder().setName("test_database_to_find_1").build();
+    testDatabases[2] =
+        new DatabaseBuilder().setName("test_database_to_find_2").build();
+    testDatabases[3] =
+        new DatabaseBuilder().setName("test_database_hidden_1").build();
+
+    // Create the databases, and reload them from the MetaStore
+    for(int i=0; i < testDatabases.length; i++) {
+      client.createDatabase(testDatabases[i]);
+      testDatabases[i] = client.getDatabase(testDatabases[i].getName());
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      if (client != null) {
+        client.close();
+      }
+    } finally {
+      client = null;
+    }
+  }
+
+  /**
+   * This test creates and queries a database and then drops it. Good for testing the happy path.
+   * @throws Exception
+   */
+  @Test
+  public void testCreateGetDeleteDatabase() throws Exception {
+    Database database = getDatabaseWithAllParametersSet();
+    client.createDatabase(database);
+    Database createdDatabase = client.getDatabase(database.getName());
+
+    // The createTime will be set on the server side, so the comparison should skip it
+    Assert.assertEquals("Comparing databases", database, createdDatabase);
+    Assert.assertTrue("The directory should be created", metaStore.isPathExists(
+        new Path(database.getLocationUri())));
+    client.dropDatabase(database.getName());
+    Assert.assertFalse("The directory should be removed",
+        metaStore.isPathExists(new Path(database.getLocationUri())));
+    try {
+      client.getDatabase(database.getName());
+      Assert.fail("Expected a NoSuchObjectException to be thrown");
+    } catch (NoSuchObjectException exception) {
+      // Expected exception
+    }
+  }
+
+  @Test
+  public void testCreateDatabaseDefaultValues() throws Exception {
+    Database database = new Database();
+    database.setName("dummy");
+
+    client.createDatabase(database);
+    Database createdDatabase = client.getDatabase(database.getName());
+
+    Assert.assertNull("Comparing description", createdDatabase.getDescription());
+    Assert.assertEquals("Comparing location", metaStore.getWarehouseRoot() + "/" +
+                                                  createdDatabase.getName() + ".db", createdDatabase.getLocationUri());
+    Assert.assertEquals("Comparing parameters", new HashMap<String, String>(),
+        createdDatabase.getParameters());
+    Assert.assertNull("Comparing privileges", createdDatabase.getPrivileges());
+    Assert.assertNull("Comparing owner name", createdDatabase.getOwnerName());
+    Assert.assertEquals("Comparing owner type", PrincipalType.USER, createdDatabase.getOwnerType());
+  }
+
+  @Test(expected = MetaException.class)
+  public void testCreateDatabaseNullName() throws Exception {
+    Database database = testDatabases[0];
+
+    // Missing class setting field
+    database.setName(null);
+
+    client.createDatabase(database);
+    // Throwing InvalidObjectException would be more appropriate, but we do not change the API
+  }
+
+  @Test(expected = InvalidObjectException.class)
+  public void testCreateDatabaseInvalidName() throws Exception {
+    Database database = testDatabases[0];
+
+    // Invalid character in new database name
+    database.setName("test_database_1;");
+    client.createDatabase(database);
+  }
+
+  @Test(expected = InvalidObjectException.class)
+  public void testCreateDatabaseEmptyName() throws Exception {
+    Database database = testDatabases[0];
+
+    // Empty new database name
+    database.setName("");
+    client.createDatabase(database);
+    // Throwing InvalidObjectException would be more appropriate, but we do not change the API
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void testCreateDatabaseAlreadyExists() throws Exception {
+    Database database = testDatabases[0];
+
+    // Already existing database
+    client.createDatabase(database);
+  }
+
+  @Test
+  public void testDefaultDatabaseData() throws Exception {
+    Database database = client.getDatabase(DEFAULT_DATABASE);
+    Assert.assertEquals("Default database name", "default", database.getName());
+    Assert.assertEquals("Default database description", "Default Hive database",
+        database.getDescription());
+    Assert.assertEquals("Default database location", metaStore.getWarehouseRoot(),
+        new Path(database.getLocationUri()));
+    Assert.assertEquals("Default database parameters", new HashMap<String, String>(),
+        database.getParameters());
+    Assert.assertEquals("Default database owner", "public", database.getOwnerName());
+    Assert.assertEquals("Default database owner type", PrincipalType.ROLE, database.getOwnerType());
+    Assert.assertNull("Default database privileges", database.getPrivileges());
+  }
+
+  @Test
+  public void testGetDatabaseCaseInsensitive() throws Exception {
+    Database database = testDatabases[0];
+
+    // Test in upper case
+    Database resultUpper = client.getDatabase(database.getName().toUpperCase());
+    Assert.assertEquals("Comparing databases", database, resultUpper);
+
+    // Test in mixed case
+    Database resultMix = client.getDatabase("teST_dAtABase_1");
+    Assert.assertEquals("Comparing databases", database, resultMix);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void testGetDatabaseNoSuchDatabase() throws Exception {
+    client.getDatabase("no_such_database");
+  }
+
+  @Test
+  public void testGetDatabaseNullName() throws Exception {
+    // Missing database name in the query
+    try {
+      client.getDatabase(null);
+      // TODO: Should have a check on the server side.
+      Assert.fail("Expected a NullPointerException or TTransportException to be thrown");
+    } catch (NullPointerException exception) {
+      // Expected exception - Embedded MetaStore
+    } catch (TTransportException exception) {
+      // Expected exception - Remote MetaStore
+    }
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void testDropDatabaseNoSuchDatabase() throws Exception {
+    client.dropDatabase("no_such_database");
+  }
+
+  @Test
+  public void testDropDatabaseNullName() throws Exception {
+    // Missing database in the query
+    try {
+      client.dropDatabase(null);
+      // TODO: Should be checked on server side
+      Assert.fail("Expected an NullPointerException or TTransportException to be thrown");
+    } catch (NullPointerException exception) {
+      // Expected exception - Embedded MetaStore
+    } catch (TTransportException exception) {
+      // Expected exception - Remote MetaStore
+    }
+  }
+
+  @Test
+  public void testDropDatabaseDefaultDatabase() throws Exception {
+    // Check if it is possible to drop default database
+    try {
+      client.dropDatabase(DEFAULT_DATABASE);
+      // TODO: Should be checked on server side
+      Assert.fail("Expected an MetaException or TTransportException to be thrown");
+    } catch (MetaException exception) {
+      // Expected exception - Embedded MetaStore
+    } catch (TTransportException exception) {
+      // Expected exception - Remote MetaStore
+    }
+  }
+
+  @Test
+  public void testDropDatabaseCaseInsensitive() throws Exception {
+    Database database = testDatabases[0];
+
+    // Test in upper case
+    client.dropDatabase(database.getName().toUpperCase());
+    List<String> allDatabases = client.getAllDatabases();
+    Assert.assertEquals("All databases size", 4, allDatabases.size());
+
+    // Test in mixed case
+    client.createDatabase(database);
+    client.dropDatabase("TesT_DatABaSe_1");
+    allDatabases = client.getAllDatabases();
+    Assert.assertEquals("All databases size", 4, allDatabases.size());
+  }
+
+  @Test
+  public void testDropDatabaseDeleteData() throws Exception {
+    Database database = testDatabases[0];
+    Path dataFile = new Path(database.getLocationUri().toString() + "/dataFile");
+    metaStore.createFile(dataFile, "100");
+
+    // Do not delete the data
+    client.dropDatabase(database.getName(), false, false);
+    // Check that the data still exist
+    Assert.assertTrue("The data file should still exist", metaStore.isPathExists(dataFile));
+
+    // Recreate the database
+    client.createDatabase(database);
+    Assert.assertTrue("The data file should still exist", metaStore.isPathExists(dataFile));
+
+    // Delete the data
+    client.dropDatabase(database.getName(), true, false);
+    // Check that the data is removed
+    Assert.assertFalse("The data file should not exist", metaStore.isPathExists(dataFile));
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void testDropDatabaseIgnoreUnknownFalse() throws Exception {
+    // No such database
+    client.dropDatabase("no_such_database", false, false);
+  }
+
+  @Test
+  public void testDropDatabaseIgnoreUnknownTrue() throws Exception {
+    // No such database
+    client.dropDatabase("no_such_database", false, true);
+  }
+
+  @Test(expected = InvalidOperationException.class)
+  public void testDropDatabaseWithTable() throws Exception {
+    Database database = testDatabases[0];
+    Table testTable =
+        new TableBuilder()
+            .setDbName(database.getName())
+            .setTableName("test_table")
+            .addCol("test_col", "int")
+            .build();
+    client.createTable(testTable);
+
+    client.dropDatabase(database.getName(), true, true, false);
+  }
+
+  @Test
+  public void testDropDatabaseWithTableCascade() throws Exception {
+    Database database = testDatabases[0];
+    Table testTable =
+        new TableBuilder()
+            .setDbName(database.getName())
+            .setTableName("test_table")
+            .addCol("test_col", "int")
+            .build();
+    client.createTable(testTable);
+
+    client.dropDatabase(database.getName(), true, true, true);
+    Assert.assertFalse("The directory should be removed",
+        metaStore.isPathExists(new Path(database.getLocationUri())));
+  }
+
+  @Test(expected = InvalidOperationException.class)
+  public void testDropDatabaseWithFunction() throws Exception {
+    Database database = testDatabases[0];
+
+    Function testFunction =
+        new FunctionBuilder()
+            .setDbName(database.getName())
+            .setName("test_function")
+            .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper")
+            .build();
+
+    client.createFunction(testFunction);
+
+    client.dropDatabase(database.getName(), true, true, false);
+  }
+
+  @Test
+  public void testDropDatabaseWithFunctionCascade() throws Exception {
+    Database database = testDatabases[0];
+
+    Function testFunction =
+        new FunctionBuilder()
+            .setDbName(database.getName())
+            .setName("test_function")
+            .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper")
+            .build();
+
+    client.createFunction(testFunction);
+
+    client.dropDatabase(database.getName(), true, true, true);
+    Assert.assertFalse("The directory should be removed",
+        metaStore.isPathExists(new Path(database.getLocationUri())));
+  }
+
+  /**
+   * Creates an index in the given database for testing purposes
+   * @param databaseName The database name in which the index should be creatd
+   * @throws TException If there is an error during the index creation
+   */
+  private void createIndex(String databaseName) throws TException {
+    Table testTable =
+        new TableBuilder()
+            .setDbName(databaseName)
+            .setTableName("test_table")
+            .addCol("test_col", "int")
+            .build();
+
+    Index testIndex =
+        new IndexBuilder()
+            .setIndexName("test_index")
+            .setIndexTableName("test_index_table")
+            .setDbAndTableName(testTable)
+            .addCol("test_col", "int")
+            .build();
+    Table testIndexTable =
+        new TableBuilder()
+            .setDbName(databaseName)
+            .setTableName("test_index_table")
+            .addCol("test_col", "int")
+            .build();
+
+    // Drop database with index
+    client.createTable(testTable);
+    client.createIndex(testIndex, testIndexTable);
+  }
+
+  @Test
+  public void testDropDatabaseWithIndex() throws Exception {
+    Database database = testDatabases[0];
+    createIndex(database.getName());
+
+    // TODO: Known error, should be fixed
+    // client.dropDatabase(database.getName(), true, true, true);
+    // Need to drop index to clean up the mess
+    try {
+      // Without cascade
+      client.dropDatabase(database.getName(), true, true, false);
+      Assert.fail("Expected an InvalidOperationException to be thrown");
+    } catch (InvalidOperationException exception) {
+      // Expected exception
+    }
+    client.dropIndex(database.getName(), "test_table", "test_index", true);
+    // TODO: End index hack
+  }
+
+  @Test
+  public void testDropDatabaseWithIndexCascade() throws Exception {
+    Database database = testDatabases[0];
+    createIndex(database.getName());
+
+    // With cascade
+    // TODO: Known error, should be fixed
+    // client.dropDatabase(database.getName(), true, true, true);
+    // Need to drop index to clean up the mess
+    client.dropIndex(database.getName(), "test_table", "test_index", true);
+    client.dropDatabase(database.getName(), true, true, true);
+    Assert.assertFalse("The directory should be removed",
+        metaStore.isPathExists(new Path(database.getLocationUri())));
+  }
+
+  @Test
+  public void testGetAllDatabases() throws Exception {
+    List<String> allDatabases = client.getAllDatabases();
+    Assert.assertEquals("All databases size", 5, allDatabases.size());
+    for(Database database : testDatabases) {
+      Assert.assertTrue("Checking database names", allDatabases.contains(database.getName()));
+    }
+    Assert.assertTrue("Checnking that default database is returned",
+        allDatabases.contains(DEFAULT_DATABASE));
+
+    // Drop one database, see what remains
+    client.dropDatabase(testDatabases[1].getName());
+    allDatabases = client.getAllDatabases();
+    Assert.assertEquals("All databases size", 4, allDatabases.size());
+    for(Database database : testDatabases) {
+      if (!database.getName().equals(testDatabases[1].getName())) {
+        Assert.assertTrue("Checking database names", allDatabases.contains(database.getName()));
+      }
+    }
+    Assert.assertTrue("Checnking that default database is returned",
+        allDatabases.contains(DEFAULT_DATABASE));
+    Assert.assertFalse("Checking that the deleted database is not returned",
+        allDatabases.contains(testDatabases[1].getName()));
+  }
+
+  @Test
+  public void testGetDatabases() throws Exception {
+    // Find databases which name contains _to_find_
+    List<String> databases = client.getDatabases("*_to_find_*");
+    Assert.assertEquals("Found databases size", 2, databases.size());
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1"));
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2"));
+
+    // Find databases which name contains _to_find_ or _hidden_
+    databases = client.getDatabases("*_to_find_*|*_hidden_*");
+    Assert.assertEquals("Found databases size", 3, databases.size());
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1"));
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2"));
+    Assert.assertTrue("Should contain", databases.contains("test_database_hidden_1"));
+
+    // Look for databases but do not find any
+    databases = client.getDatabases("*_not_such_database_*");
+    Assert.assertEquals("No such databases size", 0, databases.size());
+
+    // Look for databases without pattern
+    databases = client.getDatabases(null);
+    Assert.assertEquals("Search databases without pattern size", 5, databases.size());
+  }
+
+  @Test
+  public void testGetDatabasesCaseInsensitive() throws Exception {
+    // Check case insensitive search
+    List<String> databases = client.getDatabases("*_tO_FiND*");
+    Assert.assertEquals("Found databases size", 2, databases.size());
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1"));
+    Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2"));
+  }
+
+//  HIVE-8472 - is missing on branch-2.3 which would fix this
+//  @Test
+//  public void testAlterDatabase() throws Exception {
+//    Database originalDatabase = testDatabases[0];
+//    Database newDatabase =
+//        new DatabaseBuilder()
+//            // The database name is not changed during alter
+//            .setName(originalDatabase.getName())
+//            .setOwnerType(PrincipalType.GROUP)
+//            .setOwnerName("owner2")
+//            .setLocation(metaStore.getWarehouseRoot() + "/database_location_2")
+//            .setDescription("dummy description 2")
+//            .addParam("param_key_1", "param_value_1_2")
+//            .addParam("param_key_2_3", "param_value_2_3")
+//            .build();
+//
+//    client.alterDatabase(originalDatabase.getName(), newDatabase);
+//    Database alteredDatabase = client.getDatabase(newDatabase.getName());
+//    Assert.assertEquals("Comparing Databases", newDatabase, alteredDatabase);
+//  }
+
+  @Test
+  public void testAlterDatabaseNotNullableFields() throws Exception {
+    Database database = getDatabaseWithAllParametersSet();
+    client.createDatabase(database);
+    Database originalDatabase = client.getDatabase(database.getName());
+    Database newDatabase = new Database();
+    newDatabase.setName("new_name");
+
+    client.alterDatabase(originalDatabase.getName(), newDatabase);
+    // The name should not be changed, so reload the db with the original name
+    Database alteredDatabase = client.getDatabase(originalDatabase.getName());
+    Assert.assertEquals("Database name should not change", originalDatabase.getName(),
+        alteredDatabase.getName());
+    Assert.assertEquals("Database description should not change", originalDatabase.getDescription(),
+        alteredDatabase.getDescription());
+    Assert.assertEquals("Database location should not change", originalDatabase.getLocationUri(),
+        alteredDatabase.getLocationUri());
+    Assert.assertEquals("Database parameters should be empty", new HashMap<String, String>(),
+        alteredDatabase.getParameters());
+    Assert.assertNull("Database owner should be empty", alteredDatabase.getOwnerName());
+    Assert.assertEquals("Database owner type should not change", originalDatabase.getOwnerType(),
+        alteredDatabase.getOwnerType());
+    Assert.assertNull("Database privileges should be empty", alteredDatabase.getPrivileges());
+  }
+
+//  HIVE-8472 - is missing on branch-2.3 which would fix this
+//  @Test(expected = NoSuchObjectException.class)
+//  public void testAlterDatabaseNoSuchDatabase() throws Exception {
+//    Database newDatabase = new DatabaseBuilder().setName("test_database_altered").build();
+//
+//    client.alterDatabase("no_such_database", newDatabase);
+//  }
+
+//  HIVE-8472 - is missing on branch-2.3 which would fix this
+//  @Test
+//  public void testAlterDatabaseCaseInsensitive() throws Exception {
+//    Database originalDatabase = testDatabases[0];
+//    Database newDatabase = originalDatabase.deepCopy();
+//    newDatabase.setDescription("Altered database");
+//
+//    // Test in upper case
+//    client.alterDatabase(originalDatabase.getName().toUpperCase(), newDatabase);
+//    Database alteredDatabase = client.getDatabase(newDatabase.getName());
+//    Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase);
+//
+//    // Test in mixed case
+//    originalDatabase = testDatabases[2];
+//    newDatabase = originalDatabase.deepCopy();
+//    newDatabase.setDescription("Altered database 2");
+//    client.alterDatabase("TeST_daTAbaSe_TO_FiNd_2", newDatabase);
+//    alteredDatabase = client.getDatabase(newDatabase.getName());
+//    Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase);
+//  }
+
+  private Database getDatabaseWithAllParametersSet() throws Exception {
+    return new DatabaseBuilder()
+               .setName("dummy")
+               .setOwnerType(PrincipalType.ROLE)
+               .setOwnerName("owner")
+               .setLocation(metaStore.getWarehouseRoot() + "/database_location")
+               .setDescription("dummy description")
+               .addParam("param_key_1", "param_value_1")
+               .addParam("param_key_2", "param_value_2")
+               .build();
+  }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
new file mode 100644
index 00000000000..978df5e4142
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test class for IMetaStoreClient API. Testing the Table related functions for metadata
+ * manipulation, like creating, dropping and altering tables.
+ */
+@RunWith(Parameterized.class)
+public class TestTablesCreateDropAlterTruncate {
+  private static final Logger LOG = LoggerFactory.getLogger(TestTablesCreateDropAlterTruncate.class);
+  // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13)
+  // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a
+  // Then we should remove our own copy
+  private static Set<AbstractMetaStoreService> metaStoreServices = null;
+  private static final String DEFAULT_DATABASE = "default";
+  private final AbstractMetaStoreService metaStore;
+  private IMetaStoreClient client;
+  private Table[] testTables = new Table[6];
+
+  @Parameterized.Parameters(name = "{0}")
+  public static List<Object[]> getMetaStoreToTest() throws Exception {
+    List<Object[]> result = MetaStoreFactoryForTests.getMetaStores();
+    metaStoreServices = new HashSet<>(result.size());
+    for(Object[] test: result) {
+      metaStoreServices.add((AbstractMetaStoreService) test[1]);
+    }
+    return result;
+  }
+
+  public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService metaStore) throws Exception {
+    this.metaStore = metaStore;
+    this.metaStore.start();
+  }
+
+  // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13)
+  // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a
+  // Then we should move this to @AfterParam
+  @AfterClass
+  public static void stopMetaStores() throws Exception {
+    for (AbstractMetaStoreService metaStoreService : metaStoreServices) {
+      try {
+        metaStoreService.stop();
+      } catch (Exception e) {
+        // Catch the exceptions, so every other metastore could be stopped as well
+        // Log it, so at least there is a slight possibility we find out about this :)
+        LOG.error("Error stopping MetaStoreService", e);
+      }
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // Get new client
+    client = metaStore.getClient();
+
+    // Drop every table in the default database
+    for (String tableName : client.getAllTables(DEFAULT_DATABASE)) {
+      client.dropTable(DEFAULT_DATABASE, tableName, true, true, true);
+    }
+
+    // Clean up trash
+    metaStore.cleanWarehouseDirs();
+
+    testTables[0] =
+            new TableBuilder()
+                    .setDbName(DEFAULT_DATABASE)
+                    .setTableName("test_table")
+                    .addCol("test_col", "int")
+                    .build();
+
+    client.createTable(testTables[0]);
+    testTables[0] = client.getTable(testTables[0].getDbName(), testTables[0].getTableName());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      if (client != null) {
+        try {
+          client.close();
+        } catch (Exception e) {
+          // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+        }
+      }
+    } finally {
+      client = null;
+    }
+  }
+
+  @Test
+  public void testAlterTableExpectedPropertyMatch() throws Exception {
+    Table originalTable = testTables[0];
+
+    EnvironmentContext context = new EnvironmentContext();
+    context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_KEY, "transient_lastDdlTime");
+    context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_VALUE,
+            originalTable.getParameters().get("transient_lastDdlTime"));
+
+    client.alter_table_with_environmentContext(originalTable.getDbName(), originalTable.getTableName(),
+            originalTable, context);
+  }
+
+  @Test(expected = MetaException.class)
+  public void testAlterTableExpectedPropertyDifferent() throws Exception {
+    Table originalTable = testTables[0];
+
+    EnvironmentContext context = new EnvironmentContext();
+    context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_KEY, "transient_lastDdlTime");
+    context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_VALUE, "alma");
+
+    client.alter_table_with_environmentContext(originalTable.getDbName(), originalTable.getTableName(),
+            originalTable, context);
+  }
+
+  /**
+   * This tests ensures that concurrent Iceberg commits will fail. Acceptable as a first sanity check.
+   * <p>
+   * I have not found a good way to check that HMS side database commits are parallel in the
+   * {@link org.apache.hadoop.hive.metastore.HiveAlterHandler#alterTable(RawStore, Warehouse, String, String, Table, EnvironmentContext)}
+   * call, but this test could be used to manually ensure that using breakpoints.
+   */
+  @Test
+  public void testAlterTableExpectedPropertyConcurrent() throws Exception {
+    Table originalTable = testTables[0];
+
+    originalTable.getParameters().put("snapshot", "0");
+    client.alter_table_with_environmentContext(originalTable.getDbName(), originalTable.getTableName(),
+            originalTable, null);
+
+    ExecutorService threads = null;
+    try {
+      threads = Executors.newFixedThreadPool(2);
+      for (int i = 0; i < 3; i++) {
+        EnvironmentContext context = new EnvironmentContext();
+        context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_KEY, "snapshot");
+        context.putToProperties(hive_metastoreConstants.EXPECTED_PARAMETER_VALUE, String.valueOf(i));
+
+        Table newTable = originalTable.deepCopy();
+        newTable.getParameters().put("snapshot", String.valueOf(i + 1));
+
+        IMetaStoreClient client1 = metaStore.getClient();
+        IMetaStoreClient client2 = metaStore.getClient();
+
+        Collection<Callable<Boolean>> concurrentTasks = new ArrayList<>(2);
+        concurrentTasks.add(alterTask(client1, newTable, context));
+        concurrentTasks.add(alterTask(client2, newTable, context));
+
+        Collection<Future<Boolean>> results = threads.invokeAll(concurrentTasks);
+
+        boolean foundSuccess = false;
+        boolean foundFailure = false;
+
+        for (Future<Boolean> result : results) {
+          if (result.get()) {
+            foundSuccess = true;
+          } else {
+            foundFailure = true;
+          }
+        }
+
+        assertTrue("At least one success is expected", foundSuccess);
+        assertTrue("At least one failure is expected", foundFailure);
+      }
+    } finally {
+      if (threads != null) {
+        threads.shutdown();
+      }
+    }
+  }
+
+  private Callable<Boolean> alterTask(final IMetaStoreClient hmsClient, final Table newTable, final EnvironmentContext context) {
+    return new Callable<Boolean>() {
+      @Override
+      public Boolean call() throws Exception {
+        try {
+          hmsClient.alter_table_with_environmentContext(newTable.getDbName(), newTable.getTableName(),
+                  newTable, context);
+        } catch (Throwable e) {
+          return false;
+        }
+        return true;
+      }
+    };
+  }
+}
\ No newline at end of file
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
new file mode 100644
index 00000000000..9b4fd704e96
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.minihms;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.TrashPolicy;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * The tests should use this abstract class to access the MetaStore services.
+ * This abstract class ensures, that the same tests could be run against the different MetaStore
+ * configurations.
+ */
+public abstract class AbstractMetaStoreService {
+  protected HiveConf configuration;
+  private Warehouse warehouse;
+  private FileSystem warehouseRootFs;
+  private Path trashDir;
+
+  public AbstractMetaStoreService(HiveConf configuration) {
+    this.configuration = new HiveConf(configuration);
+  }
+
+  /**
+   * Starts the MetaStoreService. Be aware, as the current MetaStore does not implement clean
+   * shutdown, starting MetaStoreService is possible only once per test.
+   *
+   * @throws Exception if any Exception occurs
+   */
+  public void start() throws Exception {
+    warehouse = new Warehouse(configuration);
+    warehouseRootFs = warehouse.getFs(warehouse.getWhRoot());
+    TrashPolicy trashPolicy = TrashPolicy.getInstance(configuration, warehouseRootFs, warehouse.getWhRoot());
+    trashDir = trashPolicy.getCurrentTrashDir();
+  }
+
+  /**
+   * Starts the service with adding extra configuration to the default ones. Be aware, as the
+   * current MetaStore does not implement clean shutdown, starting MetaStoreService is possible
+   * only once per test.
+   *
+   * @param confOverlay The extra parameters which should be set before starting the service
+   * @throws Exception if any Exception occurs
+   */
+  public void start(Map<HiveConf.ConfVars, String> confOverlay) throws Exception {
+    // Set confOverlay parameters
+    for (Map.Entry<HiveConf.ConfVars, String> entry : confOverlay.entrySet()) {
+      HiveConf.setVar(configuration, entry.getKey(), entry.getValue());
+    }
+    // Start the service
+    start();
+  }
+
+  /**
+   * Returns the MetaStoreClient for this MetaStoreService.
+   *
+   * @return The client connected to this service
+   * @throws HiveMetaException if any Exception occurs during client configuration
+   */
+  public IMetaStoreClient getClient() throws MetaException {
+    return new HiveMetaStoreClient(configuration);
+  }
+
+  /**
+   * Returns the MetaStore Warehouse root directory name.
+   *
+   * @return The warehouse root directory
+   * @throws HiveMetaException IO failure
+   */
+  public Path getWarehouseRoot() throws MetaException {
+    return warehouse.getWhRoot();
+  }
+
+  /**
+   * Check if a path exists.
+   *
+   * @param path The path to check
+   * @return true if the path exists
+   * @throws IOException IO failure
+   */
+  public boolean isPathExists(Path path) throws IOException {
+    return warehouseRootFs.exists(path);
+  }
+
+  /**
+   * Check if a path exists in the thrash directory.
+   *
+   * @param path The path to check
+   * @return True if the path exists
+   * @throws IOException IO failure
+   */
+  public boolean isPathExistsInTrash(Path path) throws IOException {
+    Path pathInTrash = new Path(trashDir.toUri().getScheme(), trashDir.toUri().getAuthority(),
+        trashDir.toUri().getPath() + path.toUri().getPath());
+    return isPathExists(pathInTrash);
+  }
+
+  /**
+   * Creates a file on the given path.
+   *
+   * @param path Destination path
+   * @param content The content of the file
+   * @throws IOException IO failure
+   */
+  public void createFile(Path path, String content) throws IOException {
+    FSDataOutputStream outputStream = warehouseRootFs.create(path);
+    outputStream.write(content.getBytes());
+    outputStream.close();
+  }
+
+  /**
+   * Cleans the warehouse and the thrash dirs in preparation for the tests.
+   *
+   * @throws HiveMetaException IO failure
+   */
+  public void cleanWarehouseDirs() throws MetaException {
+    warehouse.deleteDir(getWarehouseRoot(), true, true);
+    warehouse.deleteDir(trashDir, true, true);
+  }
+
+  /**
+   * Stops the MetaStoreService. When MetaStore will implement clean shutdown, this method will
+   * call shutdown on MetaStore. Currently this does nothing :(
+   */
+  public void stop() {
+  }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStoreForTests.java b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStoreForTests.java
new file mode 100644
index 00000000000..b4c859de5fb
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStoreForTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.minihms;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+/**
+ * The AbstractMetaStore implementation which is used when the tests are running against a cluster
+ * created manually or by an external application (See the
+ * {@link org.apache.hadoop.hive.metastore.client.MetaStoreFactoryForTests} class).
+ */
+public class ClusterMetaStoreForTests extends AbstractMetaStoreService {
+  public ClusterMetaStoreForTests(HiveConf configuration) {
+    super(configuration);
+  }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStoreForTests.java b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStoreForTests.java
new file mode 100644
index 00000000000..7d2b34b8bea
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStoreForTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.minihms;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+/**
+ * The AbstractMetaStore implementation which is used when the tests are running against an embedded
+ * MetaStore in the same thread as the client without Thrift communication (See the
+ * {@link org.apache.hadoop.hive.metastore.client.MetaStoreFactoryForTests} class).
+ */
+public class EmbeddedMetaStoreForTests extends AbstractMetaStoreService {
+  public EmbeddedMetaStoreForTests(HiveConf configuration) {
+    super(configuration);
+  }
+}
\ No newline at end of file
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java
new file mode 100644
index 00000000000..1d2f5c3fd58
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.minihms;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+
+/**
+ * Mini HMS implementation, which can be used to run tests against different HMS configurations.
+ * Currently it supports 3 types:
+ *  - EMBEDDED - MetaStore running in embedded mode
+ *  - REMOTE - MetaStore running in the same process but in a dedicated thread and accessed
+ *  through the Thrift interface
+ *  - CLUSTER - In this case the MiniHMS is only a wrapper around the HMS running on a cluster,
+ *  so the same tests could be run against a real cluster
+ */
+public class MiniHMS {
+  public enum MiniHMSType {
+    EMBEDDED,
+    REMOTE,
+    CLUSTER
+  }
+
+  public static class Builder {
+    private HiveConf metaStoreConf = new HiveConf();
+    private MiniHMSType miniHMSType = MiniHMSType.EMBEDDED;
+
+    public Builder() {
+    }
+
+    public Builder setConf(HiveConf conf) {
+      this.metaStoreConf = new HiveConf(conf);
+      return this;
+    }
+
+    public Builder setType(MiniHMSType type) {
+      this.miniHMSType = type;
+      return this;
+    }
+
+    public AbstractMetaStoreService build() throws Exception {
+      switch (miniHMSType) {
+        case REMOTE:
+          return new RemoteMetaStoreForTests(metaStoreConf);
+        case EMBEDDED:
+          return new EmbeddedMetaStoreForTests(metaStoreConf);
+        case CLUSTER:
+          return new ClusterMetaStoreForTests(metaStoreConf);
+        default:
+          throw new IllegalArgumentException("Unexpected miniHMSType: " + miniHMSType);
+      }
+    }
+  }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStoreForTests.java b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStoreForTests.java
new file mode 100644
index 00000000000..41a45b8ca3a
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStoreForTests.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.minihms;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+
+/**
+ * The AbstractMetaStore implementation which is used when the tests are running against a
+ * MetaStore which running in a dedicated thread and accessed through the Thrift interface (See the
+ * {@link org.apache.hadoop.hive.metastore.client.MetaStoreFactoryForTests} class).
+ */
+public class RemoteMetaStoreForTests extends AbstractMetaStoreService {
+
+  public RemoteMetaStoreForTests(HiveConf configuration) {
+    super(configuration);
+  }
+
+  public void start() throws Exception {
+    HiveConf.setBoolVar(configuration, HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, false);
+    int port = MetaStoreUtils.startMetaStore(configuration);
+    HiveConf.setVar(configuration, HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
+    super.start();
+  }
+}
\ No newline at end of file