You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2018/03/31 01:25:39 UTC

[20/44] hive git commit: HIVE-18755 Modifications to the metastore for catalogs (Alan Gates, reviewed by Thejas Nair)

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 8a5de09..07fdcd7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -20,6 +20,14 @@ package org.apache.hadoop.hive.metastore;
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName;
 
 import java.io.IOException;
 import java.net.InetAddress;
@@ -85,11 +93,13 @@ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
 import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropCatalogEvent;
 import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
@@ -106,10 +116,12 @@ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
@@ -117,6 +129,7 @@ import org.apache.hadoop.hive.metastore.events.PreDropSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
 import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
@@ -161,6 +174,7 @@ import org.apache.thrift.transport.TFramedTransport;
 import org.apache.thrift.transport.TServerSocket;
 import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportFactory;
+import org.iq80.leveldb.DB;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -671,21 +685,43 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return txn;
     }
 
-    private static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
+    static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
       Configuration newConf = new Configuration(conf);
       String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
       LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
       return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
     }
 
+    @VisibleForTesting
+    public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException,
+        InvalidOperationException {
+      try {
+        Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME);
+        // Null check because in some test cases we get a null from ms.getCatalog.
+        if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) {
+          // One time update issue.  When the new 'hive' catalog is created in an upgrade the
+          // script does not know the location of the warehouse.  So we need to update it.
+          LOG.info("Setting location of default catalog, as it hasn't been done after upgrade");
+          defaultCat.setLocationUri(wh.getWhRoot().toString());
+          ms.alterCatalog(defaultCat.getName(), defaultCat);
+        }
+
+      } catch (NoSuchObjectException e) {
+        Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString());
+        cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT);
+        ms.createCatalog(cat);
+      }
+    }
+
     private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
       try {
-        ms.getDatabase(DEFAULT_DATABASE_NAME);
+        ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
       } catch (NoSuchObjectException e) {
         Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
           wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
         db.setOwnerName(PUBLIC);
         db.setOwnerType(PrincipalType.ROLE);
+        db.setCatalogName(DEFAULT_CATALOG_NAME);
         ms.createDatabase(db);
       }
     }
@@ -702,7 +738,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
      */
     private void createDefaultDB() throws MetaException {
       try {
-        createDefaultDB_core(getMS());
+        RawStore ms = getMS();
+        createDefaultCatalog(ms, wh);
+        createDefaultDB_core(ms);
       } catch (JDOException e) {
         LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
         try {
@@ -710,7 +748,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         } catch (InvalidObjectException e1) {
           throw new MetaException(e1.getMessage());
         }
-      } catch (InvalidObjectException e) {
+      } catch (InvalidObjectException|InvalidOperationException e) {
         throw new MetaException(e.getMessage());
       }
     }
@@ -853,8 +891,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return startFunction(function, "");
     }
 
-    private void startTableFunction(String function, String db, String tbl) {
-      startFunction(function, " : db=" + db + " tbl=" + tbl);
+    private void startTableFunction(String function, String catName, String db, String tbl) {
+      startFunction(function, " : tbl=" +
+          getCatalogQualifiedTableName(catName, db, tbl));
     }
 
     private void startMultiTableFunction(String function, String db, List<String> tbls) {
@@ -862,14 +901,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       startFunction(function, " : db=" + db + " tbls=" + tableNames);
     }
 
-    private void startPartitionFunction(String function, String db, String tbl,
+    private void startPartitionFunction(String function, String cat, String db, String tbl,
                                         List<String> partVals) {
-      startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]");
+      startFunction(function, " : tbl=" +
+          getCatalogQualifiedTableName(cat, db, tbl) + "[" + join(partVals, ",") + "]");
     }
 
-    private void startPartitionFunction(String function, String db, String tbl,
+    private void startPartitionFunction(String function, String catName, String db, String tbl,
                                         Map<String, String> partName) {
-      startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
+      startFunction(function, " : tbl=" +
+          getCatalogQualifiedTableName(catName, db, tbl) + "partition=" + partName);
     }
 
     private void endFunction(String function, boolean successful, Exception e) {
@@ -920,25 +961,234 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return counters;
     }
 
+    @Override
+    public void create_catalog(CreateCatalogRequest rqst)
+        throws AlreadyExistsException, InvalidObjectException, MetaException {
+      Catalog catalog = rqst.getCatalog();
+      startFunction("create_catalog", ": " + catalog.toString());
+      boolean success = false;
+      Exception ex = null;
+      try {
+        try {
+          getMS().getCatalog(catalog.getName());
+          throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists");
+        } catch (NoSuchObjectException e) {
+          // expected
+        }
+
+        if (!MetaStoreUtils.validateName(catalog.getName(), null)) {
+          throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name");
+        }
+
+        if (catalog.getLocationUri() == null) {
+          throw new InvalidObjectException("You must specify a path for the catalog");
+        }
+
+        RawStore ms = getMS();
+        Path catPath = new Path(catalog.getLocationUri());
+        boolean madeDir = false;
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        try {
+          firePreEvent(new PreCreateCatalogEvent(this, catalog));
+          if (!wh.isDir(catPath)) {
+            if (!wh.mkdirs(catPath)) {
+              throw new MetaException("Unable to create catalog path " + catPath +
+                  ", failed to create catalog " + catalog.getName());
+            }
+            madeDir = true;
+          }
+
+          ms.openTransaction();
+          ms.createCatalog(catalog);
+
+          // Create a default database inside the catalog
+          Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " +
+              catalog.getName(), catalog.getLocationUri(), Collections.emptyMap());
+          db.setCatalogName(catalog.getName());
+          create_database_core(ms, db);
+
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.CREATE_CATALOG,
+                    new CreateCatalogEvent(true, this, catalog));
+          }
+
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) {
+            ms.rollbackTransaction();
+            if (madeDir) {
+              wh.deleteDir(catPath, true);
+            }
+          }
+
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners,
+                EventType.CREATE_CATALOG,
+                new CreateCatalogEvent(success, this, catalog),
+                null,
+                transactionalListenersResponses, ms);
+          }
+        }
+        success = true;
+      } catch (AlreadyExistsException|InvalidObjectException|MetaException e) {
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("create_catalog", success, ex);
+      }
+    }
+
+    @Override
+    public GetCatalogResponse get_catalog(GetCatalogRequest rqst)
+        throws NoSuchObjectException, TException {
+      String catName = rqst.getName();
+      startFunction("get_catalog", ": " + catName);
+      Catalog cat = null;
+      Exception ex = null;
+      try {
+        cat = getMS().getCatalog(catName);
+        firePreEvent(new PreReadCatalogEvent(this, cat));
+        return new GetCatalogResponse(cat);
+      } catch (MetaException|NoSuchObjectException e) {
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_database", cat != null, ex);
+      }
+    }
+
+    @Override
+    public GetCatalogsResponse get_catalogs() throws MetaException {
+      startFunction("get_catalogs");
+
+      List<String> ret = null;
+      Exception ex = null;
+      try {
+        ret = getMS().getCatalogs();
+      } catch (MetaException e) {
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_catalog", ret != null, ex);
+      }
+      return new GetCatalogsResponse(ret == null ? Collections.emptyList() : ret);
+
+    }
+
+    @Override
+    public void drop_catalog(DropCatalogRequest rqst)
+        throws NoSuchObjectException, InvalidOperationException, MetaException {
+      String catName = rqst.getName();
+      startFunction("drop_catalog", ": " + catName);
+      if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName)) {
+        endFunction("drop_catalog", false, null);
+        throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog");
+      }
+
+      boolean success = false;
+      Exception ex = null;
+      try {
+        dropCatalogCore(catName);
+        success = true;
+      } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+        ex = e;
+        throw e;
+      } catch (Exception e) {
+        ex = e;
+        throw newMetaException(e);
+      } finally {
+        endFunction("drop_catalog", success, ex);
+      }
+
+    }
+
+    private void dropCatalogCore(String catName)
+        throws MetaException, NoSuchObjectException, InvalidOperationException {
+      boolean success = false;
+      Catalog cat = null;
+      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+      RawStore ms = getMS();
+      try {
+        ms.openTransaction();
+        cat = ms.getCatalog(catName);
+
+        firePreEvent(new PreDropCatalogEvent(this, cat));
+
+        List<String> allDbs = get_databases(prependNotNullCatToDbName(catName, null));
+        if (allDbs != null && !allDbs.isEmpty()) {
+          // It might just be the default, in which case we can drop that one if it's empty
+          if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) {
+            try {
+              drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false);
+            } catch (InvalidOperationException e) {
+              // This means there are tables of something in the database
+              throw new InvalidOperationException("There are still objects in the default " +
+                  "database for catalog " + catName);
+            } catch (InvalidObjectException|IOException|InvalidInputException e) {
+              MetaException me = new MetaException("Error attempt to drop default database for " +
+                  "catalog " + catName);
+              me.initCause(e);
+              throw me;
+            }
+          } else {
+            throw new InvalidOperationException("There are non-default databases in the catalog " +
+                catName + " so it cannot be dropped.");
+          }
+        }
+
+        ms.dropCatalog(catName) ;
+        if (!transactionalListeners.isEmpty()) {
+          transactionalListenerResponses =
+              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventType.DROP_CATALOG,
+                  new DropCatalogEvent(true, this, cat));
+        }
+
+        success = ms.commitTransaction();
+      } finally {
+        if (success) {
+          wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false);
+        } else {
+          ms.rollbackTransaction();
+        }
+
+        if (!listeners.isEmpty()) {
+          MetaStoreListenerNotifier.notifyEvent(listeners,
+              EventType.DROP_CATALOG,
+              new DropCatalogEvent(success, this, cat),
+              null,
+              transactionalListenerResponses, ms);
+        }
+      }
+    }
+
+
+    // Assumes that the catalog has already been set.
     private void create_database_core(RawStore ms, final Database db)
         throws AlreadyExistsException, InvalidObjectException, MetaException {
       if (!MetaStoreUtils.validateName(db.getName(), null)) {
         throw new InvalidObjectException(db.getName() + " is not a valid database name");
       }
 
-      if (null == db.getLocationUri()) {
-        db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString());
-      } else {
-        db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString());
+      Catalog cat = null;
+      try {
+        cat = getMS().getCatalog(db.getCatalogName());
+      } catch (NoSuchObjectException e) {
+        LOG.error("No such catalog " + db.getCatalogName());
+        throw new InvalidObjectException("No such catalog " + db.getCatalogName());
       }
+      Path dbPath = wh.determineDatabasePath(cat, db);
+      db.setLocationUri(dbPath.toString());
 
-      Path dbPath = new Path(db.getLocationUri());
       boolean success = false;
       boolean madeDir = false;
       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
       try {
         firePreEvent(new PreCreateDatabaseEvent(db, this));
         if (!wh.isDir(dbPath)) {
+          LOG.debug("Creating database path " + dbPath);
           if (!wh.mkdirs(dbPath)) {
             throw new MetaException("Unable to create database path " + dbPath +
                 ", failed to create database " + db.getName());
@@ -981,9 +1231,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       startFunction("create_database", ": " + db.toString());
       boolean success = false;
       Exception ex = null;
+      if (!db.isSetCatalogName()) db.setCatalogName(getDefaultCatalog(conf));
       try {
         try {
-          if (null != get_database_core(db.getName())) {
+          if (null != get_database_core(db.getCatalogName(), db.getName())) {
             throw new AlreadyExistsException("Database " + db.getName() + " already exists");
           }
         } catch (NoSuchObjectException e) {
@@ -1022,7 +1273,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Database db = null;
       Exception ex = null;
       try {
-        db = get_database_core(name);
+        String[] parsedDbName = parseDbName(name, conf);
+        db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
         firePreEvent(new PreReadDatabaseEvent(db, this));
       } catch (MetaException|NoSuchObjectException e) {
         ex = e;
@@ -1034,11 +1286,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
-    public Database get_database_core(final String name) throws NoSuchObjectException,
-        MetaException {
+    public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException {
       Database db = null;
       try {
-        db = getMS().getDatabase(name);
+        db = getMS().getDatabase(catName, name);
       } catch (MetaException | NoSuchObjectException e) {
         throw e;
       } catch (Exception e) {
@@ -1050,7 +1301,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public void alter_database(final String dbName, final Database newDB) throws TException {
-      startFunction("alter_database" + dbName);
+      startFunction("alter_database " + dbName);
       boolean success = false;
       Exception ex = null;
       RawStore ms = getMS();
@@ -1062,27 +1313,30 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
       }
 
+      String[] parsedDbName = parseDbName(dbName, conf);
+
       try {
-        oldDB = get_database_core(dbName);
+        oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
         if (oldDB == null) {
-          throw new MetaException("Could not alter database \"" + dbName + "\". Could not retrieve old definition.");
+          throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] +
+              "\". Could not retrieve old definition.");
         }
         firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
 
         ms.openTransaction();
-        ms.alterDatabase(dbName, newDB);
+        ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB);
 
         if (!transactionalListeners.isEmpty()) {
           transactionalListenersResponses =
-                  MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                          EventType.ALTER_DATABASE,
-                          new AlterDatabaseEvent(oldDB, newDB, true, this));
+              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventType.ALTER_DATABASE,
+                  new AlterDatabaseEvent(oldDB, newDB, true, this));
         }
 
         success = ms.commitTransaction();
-      } catch (Exception e) {
+      } catch (MetaException|NoSuchObjectException e) {
         ex = e;
-        rethrowException(e);
+        throw e;
       } finally {
         if (!success) {
           ms.rollbackTransaction();
@@ -1090,16 +1344,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         if ((null != oldDB) && (!listeners.isEmpty())) {
           MetaStoreListenerNotifier.notifyEvent(listeners,
-                  EventType.ALTER_DATABASE,
-                  new AlterDatabaseEvent(oldDB, newDB, success, this),
-                  null,
-                  transactionalListenersResponses, ms);
+              EventType.ALTER_DATABASE,
+              new AlterDatabaseEvent(oldDB, newDB, success, this),
+              null,
+              transactionalListenersResponses, ms);
         }
         endFunction("alter_database", success, ex);
       }
     }
 
-    private void drop_database_core(RawStore ms,
+    private void drop_database_core(RawStore ms, String catName,
         final String name, final boolean deleteData, final boolean cascade)
         throws NoSuchObjectException, InvalidOperationException, MetaException,
         IOException, InvalidObjectException, InvalidInputException {
@@ -1110,12 +1364,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
-        db = ms.getDatabase(name);
+        db = ms.getDatabase(catName, name);
 
         firePreEvent(new PreDropDatabaseEvent(db, this));
+        String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf);
 
-        List<String> allTables = get_all_tables(db.getName());
-        List<String> allFunctions = get_functions(db.getName(), "*");
+        List<String> allTables = get_all_tables(catPrependedName);
+        List<String> allFunctions = get_functions(catPrependedName, "*");
 
         if (!cascade) {
           if (!allTables.isEmpty()) {
@@ -1138,7 +1393,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // drop any functions before dropping db
         for (String funcName : allFunctions) {
-          drop_function(name, funcName);
+          drop_function(catPrependedName, funcName);
         }
 
         // drop tables before dropping db
@@ -1152,7 +1407,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
           List<Table> tables;
           try {
-            tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex));
+            tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex));
           } catch (UnknownDBException e) {
             throw new MetaException(e.getMessage());
           }
@@ -1178,18 +1433,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
               // For each partition in each table, drop the partitions and get a list of
               // partitions' locations which might need to be deleted
-              partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(),
+              partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(),
                   tablePath, table.getPartitionKeys(), deleteData && !isExternal(table));
 
               // Drop the table but not its data
-              drop_table(name, table.getTableName(), false);
+              drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf),
+                  table.getTableName(), false);
             }
 
             startIndex = endIndex;
           }
         }
 
-        if (ms.dropDatabase(name)) {
+        if (ms.dropDatabase(catName, name)) {
           if (!transactionalListeners.isEmpty()) {
             transactionalListenerResponses =
                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
@@ -1246,45 +1502,46 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
         throws NoSuchObjectException, InvalidOperationException, MetaException {
-
       startFunction("drop_database", ": " + dbName);
-      if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
+      String[] parsedDbName = parseDbName(dbName, conf);
+      if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) &&
+          DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) {
         endFunction("drop_database", false, null);
-        throw new MetaException("Can not drop default database");
+        throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog "
+            + DEFAULT_CATALOG_NAME);
       }
 
       boolean success = false;
       Exception ex = null;
       try {
-        drop_database_core(getMS(), dbName, deleteData, cascade);
+        drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData,
+            cascade);
         success = true;
-      } catch (IOException e) {
+      } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
         ex = e;
-        throw new MetaException(e.getMessage());
+        throw e;
       } catch (Exception e) {
         ex = e;
-        if (e instanceof MetaException) {
-          throw (MetaException) e;
-        } else if (e instanceof InvalidOperationException) {
-          throw (InvalidOperationException) e;
-        } else if (e instanceof NoSuchObjectException) {
-          throw (NoSuchObjectException) e;
-        } else {
-          throw newMetaException(e);
-        }
+        throw newMetaException(e);
       } finally {
         endFunction("drop_database", success, ex);
       }
     }
 
+
     @Override
     public List<String> get_databases(final String pattern) throws MetaException {
       startFunction("get_databases", ": " + pattern);
 
+      String[] parsedDbNamed = parseDbName(pattern, conf);
       List<String> ret = null;
       Exception ex = null;
       try {
-        ret = getMS().getDatabases(pattern);
+        if (parsedDbNamed[DB_NAME] == null) {
+          ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]);
+        } else {
+          ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]);
+        }
       } catch (Exception e) {
         ex = e;
         if (e instanceof MetaException) {
@@ -1300,23 +1557,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public List<String> get_all_databases() throws MetaException {
-      startFunction("get_all_databases");
-
-      List<String> ret = null;
-      Exception ex = null;
-      try {
-        ret = getMS().getAllDatabases();
-      } catch (Exception e) {
-        ex = e;
-        if (e instanceof MetaException) {
-          throw (MetaException) e;
-        } else {
-          throw newMetaException(e);
-        }
-      } finally {
-        endFunction("get_all_databases", ret != null, ex);
-      }
-      return ret;
+      return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf));
     }
 
     private void create_type_core(final RawStore ms, final Type type)
@@ -1463,18 +1704,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Path tblPath = null;
       boolean success = false, madeDir = false;
       try {
+        if (!tbl.isSetCatName()) tbl.setCatName(getDefaultCatalog(conf));
         firePreEvent(new PreCreateTableEvent(tbl, this));
 
         ms.openTransaction();
 
-        Database db = ms.getDatabase(tbl.getDbName());
+        Database db = ms.getDatabase(tbl.getCatName(), tbl.getDbName());
         if (db == null) {
-          throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist");
+          throw new NoSuchObjectException("The database " +
+              Warehouse.getCatalogQualifiedDbName(tbl.getCatName(), tbl.getDbName()) + " does not exist");
         }
 
         // get_table checks whether database exists, it should be moved here
-        if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) {
-          throw new AlreadyExistsException("Table " + tbl.getTableName()
+        if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) {
+          throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl)
               + " already exists");
         }
 
@@ -1482,7 +1725,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (tbl.getSd().getLocation() == null
               || tbl.getSd().getLocation().isEmpty()) {
             tblPath = wh.getDefaultTablePath(
-                ms.getDatabase(tbl.getDbName()), tbl.getTableName());
+                ms.getDatabase(tbl.getCatName(), tbl.getDbName()), tbl.getTableName());
           } else {
             if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
               LOG.warn("Location: " + tbl.getSd().getLocation()
@@ -1550,7 +1793,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
               }
             }
           }
-          int notNullConstraintSize =  notNullConstraints.size();
+          int notNullConstraintSize =  0;
           if (notNullConstraints != null) {
             for (int i = 0; i < notNullConstraints.size(); i++) {
               if (notNullConstraints.get(i).getNn_name() == null) {
@@ -1558,7 +1801,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
               }
             }
           }
-          int defaultConstraintSize =  defaultConstraints.size();
+          int defaultConstraintSize =  0;
           if (defaultConstraints!= null) {
             for (int i = 0; i < defaultConstraints.size(); i++) {
               if (defaultConstraints.get(i).getDc_name() == null) {
@@ -1649,6 +1892,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         create_table_core(getMS(), tbl, envContext);
         success = true;
       } catch (NoSuchObjectException e) {
+        LOG.warn("create_table_with_environment_context got ", e);
         ex = e;
         throw new InvalidObjectException(e.getMessage());
       } catch (Exception e) {
@@ -1704,6 +1948,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public void drop_constraint(DropConstraintRequest req)
         throws MetaException, InvalidObjectException {
+      String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
       String dbName = req.getDbname();
       String tableName = req.getTablename();
       String constraintName = req.getConstraintname();
@@ -1713,9 +1958,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       RawStore ms = getMS();
       try {
         ms.openTransaction();
-        ms.dropConstraint(dbName, tableName, constraintName);
+        ms.dropConstraint(catName, dbName, tableName, constraintName);
         if (transactionalListeners.size() > 0) {
-          DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
+          DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName,
               tableName, constraintName, true, this);
           for (MetaStoreEventListener transactionalListener : transactionalListeners) {
             transactionalListener.onDropConstraint(dropConstraintEvent);
@@ -1737,7 +1982,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           ms.rollbackTransaction();
         } else {
           for (MetaStoreEventListener listener : listeners) {
-            DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
+            DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName,
                 tableName, constraintName, true, this);
             listener.onDropConstraint(dropConstraintEvent);
           }
@@ -2059,15 +2304,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     }
 
-    private boolean is_table_exists(RawStore ms, String dbname, String name)
+    private boolean is_table_exists(RawStore ms, String catName, String dbname, String name)
         throws MetaException {
-      return (ms.getTable(dbname, name) != null);
+      return (ms.getTable(catName, dbname, name) != null);
     }
 
-    private boolean drop_table_core(final RawStore ms, final String dbname, final String name,
-        final boolean deleteData, final EnvironmentContext envContext,
-        final String indexName) throws NoSuchObjectException,
-        MetaException, IOException, InvalidObjectException, InvalidInputException {
+    private boolean drop_table_core(final RawStore ms, final String catName, final String dbname,
+                                    final String name, final boolean deleteData,
+                                    final EnvironmentContext envContext, final String indexName)
+        throws NoSuchObjectException, MetaException, IOException, InvalidObjectException,
+        InvalidInputException {
       boolean success = false;
       boolean isExternal = false;
       Path tblPath = null;
@@ -2078,7 +2324,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         ms.openTransaction();
         // drop any partitions
-        tbl = get_table_core(dbname, name);
+        tbl = get_table_core(catName, dbname, name);
         if (tbl == null) {
           throw new NoSuchObjectException(name + " doesn't exist");
         }
@@ -2101,10 +2347,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         // Drop the partitions and get a list of locations which need to be deleted
-        partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath,
+        partPaths = dropPartitionsAndGetLocations(ms, catName, dbname, name, tblPath,
             tbl.getPartitionKeys(), deleteData && !isExternal);
-        if (!ms.dropTable(dbname, name)) {
-          String tableName = dbname + "." + name;
+
+        // Drop any constraints on the table
+        ms.dropConstraint(catName, dbname, name, null, true);
+
+        if (!ms.dropTable(catName, dbname, name)) {
+          String tableName = getCatalogQualifiedTableName(catName, dbname, name);
           throw new MetaException(indexName == null ? "Unable to drop table " + tableName:
               "Unable to drop index table " + tableName + " for index " + indexName);
         } else {
@@ -2221,7 +2471,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
      * @throws InvalidObjectException
      * @throws NoSuchObjectException
      */
-    private List<Path> dropPartitionsAndGetLocations(RawStore ms, String dbName,
+    private List<Path> dropPartitionsAndGetLocations(RawStore ms, String catName, String dbName,
       String tableName, Path tablePath, List<FieldSchema> partitionKeys, boolean checkLocation)
       throws MetaException, IOException, NoSuchObjectException, InvalidObjectException,
       InvalidInputException {
@@ -2232,12 +2482,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         tableDnsPath = wh.getDnsPath(tablePath);
       }
       List<Path> partPaths = new ArrayList<>();
-      Table tbl = ms.getTable(dbName, tableName);
+      Table tbl = ms.getTable(catName, dbName, tableName);
 
       // call dropPartition on each of the table's partitions to follow the
       // procedure for cleanly dropping partitions.
       while (true) {
-        List<Partition> partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize);
+        List<Partition> partsToDelete = ms.getPartitions(catName, dbName, tableName, partitionBatchSize);
         if (partsToDelete == null || partsToDelete.isEmpty()) {
           break;
         }
@@ -2269,7 +2519,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             }
           }
         }
-        ms.dropPartitions(dbName, tableName, partNames);
+        ms.dropPartitions(catName, dbName, tableName, partNames);
       }
 
       return partPaths;
@@ -2285,12 +2535,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public void drop_table_with_environment_context(final String dbname, final String name,
         final boolean deleteData, final EnvironmentContext envContext)
         throws NoSuchObjectException, MetaException {
-      startTableFunction("drop_table", dbname, name);
+      String[] parsedDbName = parseDbName(dbname, conf);
+      startTableFunction("drop_table", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name);
 
       boolean success = false;
       Exception ex = null;
       try {
-        success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null);
+        success = drop_table_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name,
+            deleteData, envContext, null);
       } catch (IOException e) {
         ex = e;
         throw new MetaException(e.getMessage());
@@ -2323,6 +2575,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private void alterPartitionForTruncate(final RawStore ms,
+                                           final String catName,
                                            final String dbName,
                                            final String tableName,
                                            final Table table,
@@ -2342,18 +2595,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                 new AlterPartitionEvent(partition, partition, table, true, true, this));
       }
 
-      alterHandler.alterPartition(ms, wh, dbName, tableName, null, partition, environmentContext, this);
+      alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition,
+          environmentContext, this);
     }
 
     private void alterTableStatsForTruncate(final RawStore ms,
+                                            final String catName,
                                             final String dbName,
                                             final String tableName,
                                             final Table table,
                                             final List<String> partNames) throws Exception {
       if (partNames == null) {
         if (0 != table.getPartitionKeysSize()) {
-          for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
-            alterPartitionForTruncate(ms, dbName, tableName, table, partition);
+          for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
+            alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition);
           }
         } else {
           EnvironmentContext environmentContext = new EnvironmentContext();
@@ -2371,17 +2626,18 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                     new AlterTableEvent(table, table, true, true, this));
           }
 
-          alterHandler.alterTable(ms, wh, dbName, tableName, table, environmentContext, this);
+          alterHandler.alterTable(ms, wh, catName, dbName, tableName, table, environmentContext, this);
         }
       } else {
-        for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) {
-          alterPartitionForTruncate(ms, dbName, tableName, table, partition);
+        for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
+          alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition);
         }
       }
       return;
     }
 
     private List<Path> getLocationsForTruncate(final RawStore ms,
+                                               final String catName,
                                                final String dbName,
                                                final String tableName,
                                                final Table table,
@@ -2389,14 +2645,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       List<Path> locations = new ArrayList<>();
       if (partNames == null) {
         if (0 != table.getPartitionKeysSize()) {
-          for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
+          for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
             locations.add(new Path(partition.getSd().getLocation()));
           }
         } else {
           locations.add(new Path(table.getSd().getLocation()));
         }
       } else {
-        for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) {
+        for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
           locations.add(new Path(partition.getSd().getLocation()));
         }
       }
@@ -2413,11 +2669,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public void truncate_table(final String dbName, final String tableName, List<String> partNames)
       throws NoSuchObjectException, MetaException {
       try {
-        Table tbl = get_table_core(dbName, tableName);
+        String[] parsedDbName = parseDbName(dbName, conf);
+        Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
         boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge")));
 
         // This is not transactional
-        for (Path location : getLocationsForTruncate(getMS(), dbName, tableName, tbl, partNames)) {
+        for (Path location : getLocationsForTruncate(getMS(), parsedDbName[CAT_NAME],
+            parsedDbName[DB_NAME], tableName, tbl, partNames)) {
           FileSystem fs = location.getFileSystem(getConf());
           if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) &&
               !FileUtils.pathHasSnapshotSubDir(location, fs)) {
@@ -2439,7 +2697,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         // Alter the table/partition stats and also notify truncate table event
-        alterTableStatsForTruncate(getMS(), dbName, tableName, tbl, partNames);
+        alterTableStatsForTruncate(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+            tableName, tbl, partNames);
       } catch (IOException e) {
         throw new MetaException(e.getMessage());
       } catch (Exception e) {
@@ -2468,17 +2727,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Deprecated
     public Table get_table(final String dbname, final String name) throws MetaException,
         NoSuchObjectException {
-      return getTableInternal(dbname, name, null);
+      String[] parsedDbName = parseDbName(dbname, conf);
+      return getTableInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null);
     }
 
     @Override
     public GetTableResult get_table_req(GetTableRequest req) throws MetaException,
         NoSuchObjectException {
-      return new GetTableResult(getTableInternal(req.getDbName(), req.getTblName(),
+      String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
+      return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(),
           req.getCapabilities()));
     }
 
-    private Table getTableInternal(String dbname, String name,
+    private Table getTableInternal(String catName, String dbname, String name,
         ClientCapabilities capabilities) throws MetaException, NoSuchObjectException {
       if (isInTest) {
         assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
@@ -2486,10 +2747,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
 
       Table t = null;
-      startTableFunction("get_table", dbname, name);
+      startTableFunction("get_table", catName, dbname, name);
       Exception ex = null;
       try {
-        t = get_table_core(dbname, name);
+        t = get_table_core(catName, dbname, name);
         if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
           assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
               "insert-only tables", "get_table_req");
@@ -2509,10 +2770,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public List<TableMeta> get_table_meta(String dbnames, String tblNames, List<String> tblTypes)
         throws MetaException, NoSuchObjectException {
       List<TableMeta> t = null;
-      startTableFunction("get_table_metas", dbnames, tblNames);
+      String[] parsedDbName = parseDbName(dbnames, conf);
+      startTableFunction("get_table_metas", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames);
       Exception ex = null;
       try {
-        t = getMS().getTableMeta(dbnames, tblNames, tblTypes);
+        t = getMS().getTableMeta(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames, tblTypes);
       } catch (Exception e) {
         ex = e;
         throw newMetaException(e);
@@ -2523,14 +2785,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
-    public Table get_table_core(final String dbname, final String name) throws MetaException,
-        NoSuchObjectException {
+    public Table get_table_core(final String catName, final String dbname, final String name)
+        throws MetaException, NoSuchObjectException {
       Table t = null;
       try {
-        t = getMS().getTable(dbname, name);
+        t = getMS().getTable(catName, dbname, name);
         if (t == null) {
-          throw new NoSuchObjectException(dbname + "." + name
-              + " table not found");
+          throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbname, name) +
+            " table not found");
         }
       } catch (Exception e) {
         throwMetaException(e);
@@ -2558,17 +2820,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Deprecated
     public List<Table> get_table_objects_by_name(final String dbName, final List<String> tableNames)
         throws MetaException, InvalidOperationException, UnknownDBException {
-      return getTableObjectsInternal(dbName, tableNames, null);
+      String[] parsedDbName = parseDbName(dbName, conf);
+      return getTableObjectsInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableNames, null);
     }
 
     @Override
     public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws TException {
-      return new GetTablesResult(getTableObjectsInternal(
+      String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
+      return new GetTablesResult(getTableObjectsInternal(catName,
           req.getDbName(), req.getTblNames(), req.getCapabilities()));
     }
 
-    private List<Table> getTableObjectsInternal(
-        String dbName, List<String> tableNames, ClientCapabilities capabilities)
+    private List<Table> getTableObjectsInternal(String catName, String dbName,
+                                                List<String> tableNames,
+                                                ClientCapabilities capabilities)
             throws MetaException, InvalidOperationException, UnknownDBException {
       if (isInTest) {
         assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
@@ -2606,7 +2871,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         // Oracle cannot have over 1000 expressions in a in-list
         while (startIndex < distinctTableNames.size()) {
           int endIndex = Math.min(startIndex + tableBatchSize, distinctTableNames.size());
-          tables.addAll(ms.getTableObjectsByName(dbName, distinctTableNames.subList(startIndex, endIndex)));
+          tables.addAll(ms.getTableObjectsByName(catName, dbName, distinctTableNames.subList(
+              startIndex, endIndex)));
           startIndex = endIndex;
         }
         for (Table t : tables) {
@@ -2638,8 +2904,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
-    public void update_creation_metadata(final String dbName, final String tableName, CreationMetadata cm) throws MetaException {
-      getMS().updateCreationMetadata(dbName, tableName, cm);
+    public void update_creation_metadata(String catName, final String dbName, final String tableName, CreationMetadata cm) throws MetaException {
+      getMS().updateCreationMetadata(catName, dbName, tableName, cm);
     }
 
     private void assertClientHasCapability(ClientCapabilities client,
@@ -2668,14 +2934,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       List<String> tables = null;
       startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter);
       Exception ex = null;
+      String[] parsedDbName = parseDbName(dbName, conf);
       try {
-        if (dbName == null || dbName.isEmpty()) {
+        if (parsedDbName[CAT_NAME] == null || parsedDbName[CAT_NAME].isEmpty() ||
+            parsedDbName[DB_NAME] == null || parsedDbName[DB_NAME].isEmpty()) {
           throw new UnknownDBException("DB name is null or empty");
         }
         if (filter == null) {
           throw new InvalidOperationException(filter + " cannot apply null filter");
         }
-        tables = getMS().listTableNamesByFilter(dbName, filter, maxTables);
+        tables = getMS().listTableNamesByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], filter, maxTables);
       } catch (Exception e) {
         ex = e;
         if (e instanceof MetaException) {
@@ -2693,9 +2961,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return tables;
     }
 
-    private Partition append_partition_common(RawStore ms, String dbName, String tableName,
-        List<String> part_vals, EnvironmentContext envContext) throws InvalidObjectException,
-        AlreadyExistsException, MetaException {
+    private Partition append_partition_common(RawStore ms, String catName, String dbName,
+                                              String tableName, List<String> part_vals,
+                                              EnvironmentContext envContext)
+        throws InvalidObjectException, AlreadyExistsException, MetaException {
 
       Partition part = new Partition();
       boolean success = false, madeDir = false;
@@ -2704,13 +2973,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
+        part.setCatName(catName);
         part.setDbName(dbName);
         part.setTableName(tableName);
         part.setValues(part_vals);
 
         MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern);
 
-        tbl = ms.getTable(part.getDbName(), part.getTableName());
+        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
         if (tbl == null) {
           throw new InvalidObjectException(
               "Unable to add partition because table or database do not exist");
@@ -2729,7 +2999,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         Partition old_part;
         try {
-          old_part = ms.getPartition(part.getDbName(), part
+          old_part = ms.getPartition(part.getCatName(), part.getDbName(), part
               .getTableName(), part.getValues());
         } catch (NoSuchObjectException e) {
           // this means there is no existing partition
@@ -2810,10 +3080,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public Partition append_partition_with_environment_context(final String dbName,
         final String tableName, final List<String> part_vals, final EnvironmentContext envContext)
         throws InvalidObjectException, AlreadyExistsException, MetaException {
-      if (part_vals == null) {
+      if (part_vals == null || part_vals.isEmpty()) {
         throw new MetaException("The partition values must not be null.");
       }
-      startPartitionFunction("append_partition", dbName, tableName, part_vals);
+      String[] parsedDbName = parseDbName(dbName, conf);
+      startPartitionFunction("append_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals);
       if (LOG.isDebugEnabled()) {
         for (String part : part_vals) {
           LOG.debug(part);
@@ -2823,7 +3094,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Partition ret = null;
       Exception ex = null;
       try {
-        ret = append_partition_common(getMS(), dbName, tableName, part_vals, envContext);
+        ret = append_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals, envContext);
       } catch (Exception e) {
         ex = e;
         if (e instanceof MetaException) {
@@ -2930,7 +3201,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     }
 
-    private List<Partition> add_partitions_core(final RawStore ms,
+    private List<Partition> add_partitions_core(final RawStore ms, String catName,
         String dbName, String tblName, List<Partition> parts, final boolean ifNotExists)
         throws TException {
       logInfo("add_partitions");
@@ -2944,10 +3215,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       try {
         ms.openTransaction();
-        tbl = ms.getTable(dbName, tblName);
+        tbl = ms.getTable(catName, dbName, tblName);
         if (tbl == null) {
           throw new InvalidObjectException("Unable to add partitions because "
-              + "database or table " + dbName + "." + tblName + " does not exist");
+              + getCatalogQualifiedTableName(catName, dbName, tblName) +
+              " does not exist");
         }
 
         if (!parts.isEmpty()) {
@@ -2958,8 +3230,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final Table table = tbl;
         for (final Partition part : parts) {
           if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
-            throw new MetaException("Partition does not belong to target table "
-                + dbName + "." + tblName + ": " + part);
+            throw new MetaException("Partition does not belong to target table " +
+                getCatalogQualifiedTableName(catName, dbName, tblName) + ": " +
+                    part);
           }
 
           boolean shouldAdd = startAddPartition(ms, part, ifNotExists);
@@ -3018,7 +3291,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (!newParts.isEmpty()) {
-          success = ms.addPartitions(dbName, tblName, newParts);
+          success = ms.addPartitions(catName, dbName, tblName, newParts);
         } else {
           success = true;
         }
@@ -3080,7 +3353,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         return result;
       }
       try {
-        List<Partition> parts = add_partitions_core(getMS(), request.getDbName(),
+        if (!request.isSetCatName()) request.setCatName(getDefaultCatalog(conf));
+        // Make sure all of the partitions have the catalog set as well
+        request.getParts().forEach(p -> {
+          if (!p.isSetCatName()) p.setCatName(getDefaultCatalog(conf));
+        });
+        List<Partition> parts = add_partitions_core(getMS(), request.getCatName(), request.getDbName(),
             request.getTblName(), request.getParts(), request.isIfNotExists());
         if (request.isNeedResult()) {
           result.setPartitions(parts);
@@ -3105,7 +3383,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Exception ex = null;
       try {
         // Old API assumed all partitions belong to the same table; keep the same assumption
-        ret = add_partitions_core(getMS(), parts.get(0).getDbName(),
+        if (!parts.get(0).isSetCatName()) {
+          String defaultCat = getDefaultCatalog(conf);
+          for (Partition p : parts) p.setCatName(defaultCat);
+        }
+        ret = add_partitions_core(getMS(), parts.get(0).getCatName(), parts.get(0).getDbName(),
             parts.get(0).getTableName(), parts, false).size();
         assert ret == parts.size();
       } catch (Exception e) {
@@ -3137,12 +3419,21 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       String dbName = partSpecs.get(0).getDbName();
       String tableName = partSpecs.get(0).getTableName();
+      // If the catalog name isn't set, we need to go through and set it.
+      String catName;
+      if (!partSpecs.get(0).isSetCatName()) {
+        catName = getDefaultCatalog(conf);
+        partSpecs.forEach(ps -> ps.setCatName(catName));
+      } else {
+        catName = partSpecs.get(0).getCatName();
+      }
 
-      return add_partitions_pspec_core(getMS(), dbName, tableName, partSpecs, false);
+      return add_partitions_pspec_core(getMS(), catName, dbName, tableName, partSpecs, false);
     }
 
-    private int add_partitions_pspec_core(
-        RawStore ms, String dbName, String tblName, List<PartitionSpec> partSpecs, boolean ifNotExists)
+    private int add_partitions_pspec_core(RawStore ms, String catName, String dbName,
+                                          String tblName, List<PartitionSpec> partSpecs,
+                                          boolean ifNotExists)
         throws TException {
       boolean success = false;
       // Ensures that the list doesn't have dups, and keeps track of directories we have created.
@@ -3154,7 +3445,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
-        tbl = ms.getTable(dbName, tblName);
+        tbl = ms.getTable(catName, dbName, tblName);
         if (tbl == null) {
           throw new InvalidObjectException("Unable to add partitions because "
               + "database or table " + dbName + "." + tblName + " does not exist");
@@ -3166,7 +3457,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         while(partitionIterator.hasNext()) {
           final Partition part = partitionIterator.getCurrent();
 
-          if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
+          if (!part.getTableName().equalsIgnoreCase(tblName) || !part.getDbName().equalsIgnoreCase(dbName)) {
             throw new MetaException("Partition does not belong to target table "
                 + dbName + "." + tblName + ": " + part);
           }
@@ -3222,7 +3513,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           throw new MetaException(e.getMessage());
         }
 
-        success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists);
+        success = ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists);
         //setting success to false to make sure that if the listener fails, rollback happens.
         success = false;
 
@@ -3260,7 +3551,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         RawStore ms, Partition part, boolean ifNotExists) throws TException {
       MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
           partitionValidationPattern);
-      boolean doesExist = ms.doesPartitionExist(
+      boolean doesExist = ms.doesPartitionExist(part.getCatName(),
           part.getDbName(), part.getTableName(), part.getValues());
       if (doesExist && !ifNotExists) {
         throw new AlreadyExistsException("Partition already exists: " + part);
@@ -3358,9 +3649,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean success = false;
       Table tbl = null;
       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+      if (!part.isSetCatName()) part.setCatName(getDefaultCatalog(conf));
       try {
         ms.openTransaction();
-        tbl = ms.getTable(part.getDbName(), part.getTableName());
+        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
         if (tbl == null) {
           throw new InvalidObjectException(
               "Unable to add partition because table or database do not exist");
@@ -3424,7 +3716,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throws InvalidObjectException, AlreadyExistsException,
         MetaException {
       startTableFunction("add_partition",
-          part.getDbName(), part.getTableName());
+          part.getCatName(), part.getDbName(), part.getTableName());
       Partition ret = null;
       Exception ex = null;
       try {
@@ -3451,6 +3743,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         String sourceDbName, String sourceTableName, String destDbName,
         String destTableName) throws TException {
       exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName);
+      // Wouldn't it make more sense to return the first element of the list returned by the
+      // previous call?
       return new Partition();
     }
 
@@ -3458,24 +3752,36 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
         String sourceDbName, String sourceTableName, String destDbName,
         String destTableName) throws TException {
-      if (partitionSpecs == null || sourceDbName == null || sourceTableName == null
-          || destDbName == null || destTableName == null) {
+      String[] parsedDestDbName = parseDbName(destDbName, conf);
+      String[] parsedSourceDbName = parseDbName(sourceDbName, conf);
+      // No need to check catalog for null as parseDbName() will never return null for the catalog.
+      if (partitionSpecs == null || parsedSourceDbName[DB_NAME] == null || sourceTableName == null
+          || parsedDestDbName[DB_NAME] == null || destTableName == null) {
         throw new MetaException("The DB and table name for the source and destination tables,"
             + " and the partition specs must not be null.");
       }
+      if (!parsedDestDbName[CAT_NAME].equals(parsedSourceDbName[CAT_NAME])) {
+        throw new MetaException("You cannot move a partition across catalogs");
+      }
+
       boolean success = false;
       boolean pathCreated = false;
       RawStore ms = getMS();
       ms.openTransaction();
-      Table destinationTable = ms.getTable(destDbName, destTableName);
+
+      Table destinationTable =
+          ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName);
       if (destinationTable == null) {
-        throw new MetaException(
-            "The destination table " + destDbName + "." + destTableName + " not found");
+        throw new MetaException( "The destination table " +
+            getCatalogQualifiedTableName(parsedDestDbName[CAT_NAME],
+                parsedDestDbName[DB_NAME], destTableName) + " not found");
       }
-      Table sourceTable = ms.getTable(sourceDbName, sourceTableName);
+      Table sourceTable =
+          ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName);
       if (sourceTable == null) {
-        throw new MetaException(
-            "The source table " + sourceDbName + "." + sourceTableName + " not found");
+        throw new MetaException("The source table " +
+            getCatalogQualifiedTableName(parsedSourceDbName[CAT_NAME],
+                parsedSourceDbName[DB_NAME], sourceTableName) + " not found");
       }
       List<String> partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(),
           partitionSpecs);
@@ -3490,6 +3796,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
         i++;
       }
+      // Passed the unparsed DB name here, as get_partitions_ps expects to parse it
       List<Partition> partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName,
           partVals, (short)-1);
       if (partitionsToExchange == null || partitionsToExchange.isEmpty()) {
@@ -3515,8 +3822,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           Lists.newArrayListWithCapacity(partitionsToExchange.size());
 
       // Check if any of the partitions already exists in destTable.
-      List<String> destPartitionNames =
-          ms.listPartitionNames(destDbName, destTableName, (short) -1);
+      List<String> destPartitionNames = ms.listPartitionNames(parsedDestDbName[CAT_NAME],
+          parsedDestDbName[DB_NAME], destTableName, (short) -1);
       if (destPartitionNames != null && !destPartitionNames.isEmpty()) {
         for (Partition partition : partitionsToExchange) {
           String partToExchangeName =
@@ -3531,14 +3838,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         for (Partition partition: partitionsToExchange) {
           Partition destPartition = new Partition(partition);
-          destPartition.setDbName(destDbName);
+          destPartition.setDbName(parsedDestDbName[DB_NAME]);
           destPartition.setTableName(destinationTable.getTableName());
           Path destPartitionPath = new Path(destinationTable.getSd().getLocation(),
               Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()));
           destPartition.getSd().setLocation(destPartitionPath.toString());
           ms.addPartition(destPartition);
           destPartitions.add(destPartition);
-          ms.dropPartition(partition.getDbName(), sourceTable.getTableName(),
+          ms.dropPartition(parsedSourceDbName[CAT_NAME], partition.getDbName(), sourceTable.getTableName(),
             partition.getValues());
         }
         Path destParentPath = destPath.getParent();
@@ -3610,9 +3917,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     }
 
-    private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name,
-      List<String> part_vals, final boolean deleteData, final EnvironmentContext envContext)
-      throws MetaException, NoSuchObjectException, IOException, InvalidObjectException,
+    private boolean drop_partition_common(RawStore ms, String catName, String db_name,
+                                          String tbl_name, List<String> part_vals,
+                                          final boolean deleteData, final EnvironmentContext envContext)
+        throws MetaException, NoSuchObjectException, IOException, InvalidObjectException,
       InvalidInputException {
       boolean success = false;
       Path partPath = null;
@@ -3636,8 +3944,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       try {
         ms.openTransaction();
-        part = ms.getPartition(db_name, tbl_name, part_vals);
-        tbl = get_table_core(db_name, tbl_name);
+        part = ms.getPartition(catName, db_name, tbl_name, part_vals);
+        tbl = get_table_core(catName, db_name, tbl_name);
         isExternalTbl = isExternal(tbl);
         firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
         mustPurge = isMustPurge(envContext, tbl);
@@ -3658,7 +3966,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           verifyIsWritablePath(partPath);
         }
 
-        if (!ms.dropPartition(db_name, tbl_name, part_vals)) {
+        if (!ms.dropPartition(catName, db_name, tbl_name, part_vals)) {
           throw new MetaException("Unable to drop partition");
         } else {
           if (!transactionalListeners.isEmpty()) {
@@ -3749,6 +4057,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         DropPartitionsRequest request) throws TException {
       RawStore ms = getMS();
       String dbName = request.getDbName(), tblName = request.getTblName();
+      String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf);
       boolean ifExists = request.isSetIfExists() && request.isIfExists();
       boolean deleteData = request.isSetDeleteData() && request.isDeleteData();
       boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection();
@@ -3768,7 +4077,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         // We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
         // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
-        tbl = get_table_core(dbName, tblName);
+        tbl = get_table_core(catName, dbName, tblName);
         isExternal(tbl);
         mustPurge = isMustPurge(envContext, tbl);
         int minCount = 0;
@@ -3781,7 +4090,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             ++minCount; // At least one partition per expression, if not ifExists
             List<Partition> result = new ArrayList<>();
             boolean hasUnknown = ms.getPartitionsByExpr(
-                dbName, tblName, expr.getExpr(), null, (short)-1, result);
+                catName, dbName, tblName, expr.getExpr(), null, (short)-1, result);
             if (hasUnknown) {
               // Expr is built by DDLSA, it should only contain part cols and simple ops
               throw new MetaException("Unexpected unknown partitions to drop");
@@ -3802,7 +4111,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         } else if (spec.isSetNames()) {
           partNames = spec.getNames();
           minCount = partNames.size();
-          parts = ms.getPartitionsByNames(dbName, tblName, partNames);
+          parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames);
         } else {
           throw new MetaException("Partition spec is not set");
         }
@@ -3843,7 +4152,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
 
-        ms.dropPartitions(dbName, tblName, partNames);
+        ms.dropPartitions(catName, dbName, tblName, partNames);
         if (parts != null && !transactionalListeners.isEmpty()) {
           for (Partition part : parts) {
             transactionalListenerResponses.add(
@@ -3921,13 +4230,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String tbl_name, final List<String> part_vals, final boolean deleteData,
         final EnvironmentContext envContext)
         throws TException {
-      startPartitionFunction("drop_partition", db_name, tbl_name, part_vals);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startPartitionFunction("drop_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+          tbl_name, part_vals);
       LOG.info("Partition values:" + part_vals);
 
       boolean ret = false;
       Exception ex = null;
       try {
-        ret = drop_partition_common(getMS(), db_name, tbl_name, part_vals, deleteData, envContext);
+        ret = drop_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+            tbl_name, part_vals, deleteData, envContext);
       } catch (IOException e) {
         ex = e;
         throw new MetaException(e.getMessage());
@@ -3944,13 +4256,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public Partition get_partition(final String db_name, final String tbl_name,
         final List<String> part_vals) throws MetaException, NoSuchObjectException {
-      startPartitionFunction("get_partition", db_name, tbl_name, part_vals);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startPartitionFunction("get_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+          tbl_name, part_vals);
 
       Partition ret = null;
       Exception ex = null;
       try {
-        fireReadTablePreEvent(db_name, tbl_name);
-        ret = getMS().getPartition(db_name, tbl_name, part_vals);
+        fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+        ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals);
       } catch (Exception e) {
         ex = e;
         throwMetaException(e);
@@ -3963,19 +4277,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     /**
      * Fire a pre-event for read table operation, if there are any
      * pre-event listeners registered
-     *
-     * @param dbName
-     * @param tblName
-     * @throws MetaException
-     * @throws NoSuchObjectException
      */
-    private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException {
+    private void fireReadTablePreEvent(String catName, String dbName, String tblName)
+        throws MetaException, NoSuchObjectException {
       if(preListeners.size() > 0) {
         // do this only if there is a pre event listener registered (avoid unnecessary
         // metastore api call)
-        Table t = getMS().getTable(dbName, tblName);
+        Table t = getMS().getTable(catName, dbName, tblName);
         if (t == null) {
-          throw new NoSuchObjectException(dbName + "." + tblName
+          throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tblName)
               + " table not found");
         }
         firePreEvent(new PreReadTableEvent(t, this));
@@ -3987,14 +4297,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String tbl_name, final List<String> part_vals,
         final String user_name, final List<String> group_names)
         throws TException {
-      startPartitionFunction("get_partition_with_auth", db_name, tbl_name,
-          part_vals);
-      fireReadTablePreEvent(db_name, tbl_name);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME],
+          parsedDbName[DB_NAME], tbl_name, part_vals);
+      fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
       Partition ret = null;
       Exception ex = null;
       try {
-        ret = getMS().getPartitionWithAuth(db_name, tbl_name, part_vals,
-            user_name, group_names);
+        ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+            tbl_name, part_vals, user_name, group_names);
       } catch (InvalidObjectException e) {
         ex = e;
         throw new NoSuchObjectException(e.getMessage());
@@ -4010,13 +4321,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public List<Partition> get_partitions(final String db_name, final String tbl_name,
         final short max_parts) throws NoSuchObjectException, MetaException {
-      startTableFunction("get_partitions", db_name, tbl_name);
-      fireReadTablePreEvent(db_name, tbl_name);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startTableFunction("get_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+      fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
       List<Partition> ret = null;
       Exception ex = null;
       try {
-        checkLimitNumberOfPartitionsByFilter(db_name, tbl_name, NO_FILTER_STRING, max_parts);
-        ret = getMS().getPartitions(db_name, tbl_name, max_parts);
+        checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+            tbl_name, NO_FILTER_STRING, max_parts);
+        ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name,
+            max_parts);
       } catch (Exception e) {
         ex = e;
         throwMetaException(e);
@@ -4031,14 +4345,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public List<Partition> get_partitions_with_auth(final String dbName,
         final String tblName, final short maxParts, final String userName,
         final List<String> groupNames) throws TException {
-      startTableFunction("get_partitions_with_auth", dbName, tblName);
+      String[] parsedDbName = parseDbName(dbName, conf);
+      startTableFunction("get_partitions_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName);
 
       List<Partition> ret = null;
       Exception ex = null;
       try {
-        checkLimitNumberOfPartitionsByFilter(dbName, tblName, NO_FILTER_STRING, maxParts);
-        ret = getMS().getPartitionsWithAuth(dbName, tblName, maxParts,
-            userName, groupNames);
+        checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+            tblName, NO_FILTER_STRING, maxParts);
+        ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName,
+            maxParts, userName, groupNames);
       } catch (InvalidObjectException e) {
         ex = e;
         throw new NoSuchObjectException(e.getMessage());
@@ -4052,15 +4368,21 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     }
 
-    private void checkLimitNumberOfPartitionsByFilter(String dbName, String tblName, String filterString, int maxParts) throws TException {
+    private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName,
+                                                      String tblName, String filterString,
+                                                      int maxParts) throws TException {
       if (isPartitionLimitEnabled()) {
-        checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(dbName, tblName, filterString), maxParts);
+        checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName(
+            catName, dbName, conf), tblName, filterString), maxParts);
       }
     }
 
-    private void checkLimitNumberOfPartitionsByExpr(String dbName, String tblName, byte[] filterExpr, int maxParts) throws TException {
+    private void checkLimitNumberOfPartitionsByExpr(String catName, String dbName, String tblName,
+                                                    byte[] filterExpr, int maxParts)
+        throws TException {
       if (isPartitionLimitEnabled()) {
-        checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(dbName, tblName, filterExpr), maxParts);
+        checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(catName, dbName, tblName,
+            filterExpr), maxParts);
       }
     }
 
@@ -4085,15 +4407,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public List<PartitionSpec> get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts)
       throws NoSuchObjectException, MetaException  {
 
-      String dbName = db_name.toLowerCase();
+      String[] parsedDbName = parseDbName(db_name, conf);
       String tableName = tbl_name.toLowerCase();
 
-      startTableFunction("get_partitions_pspec", dbName, tableName);
+      startTableFunction("get_partitions_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
 
       List<PartitionSpec> partitionSpecs = null;
       try {
-        Table table = get_table_core(dbName, tableName);
-        List<Partition> partitions = get_partitions(dbName, tableName, (short) max_parts);
+        Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
+        // get_partitions will parse out the catalog and db names itself
+        List<Partition> partitions = get_partitions(db_name, tableName, (short) max_parts);
 
         if (is_partition_spec_grouping_enabled(table)) {
           partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions);
@@ -4101,7 +4424,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         else {
           PartitionSpec pSpec = new PartitionSpec();
           pSpec.setPartitionList(new PartitionListComposingSpec(partitions));
-          pSpec.setDbName(dbName);
+          pSpec.setCatName(parsedDbName[CAT_NAME]);
+          pSpec.setDbName(parsedDbName[DB_NAME]);
           pSpec.setTableName(tableName);
           pSpec.setRootPath(table.getSd().getLocation());
           partitionSpecs = Arrays.asList(pSpec);
@@ -4241,12 +4565,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public List<String> get_partition_names(final String db_name, final String tbl_name,
         final short max_parts) throws NoSuchObjectException, MetaException {
-      startTableFunction("get_partition_names", db_name, tbl_name);
-      fireReadTablePreEvent(db_name, tbl_name);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+      fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
       List<String> ret = null;
       Exception ex = null;
       try {
-        ret = getMS().listPartitionNames(db_name, tbl_name, max_parts);
+        ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name,
+            max_parts);
       } catch (Exception e) {
         ex = e;
         if (e instanceof MetaException) {
@@ -4262,11 +4588,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException {
+      String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf);
       String dbName = request.getDbName();
       String tblName = request.getTblName();
+      // This is serious black magic, as the following 2 lines do nothing AFAICT but without them
+      // the subsequent call to listPartitionValues fails.
       List<FieldSchema> partCols = new ArrayList<FieldSchema>();
       partCols.add(request.getPartitionKeys().get(0));
-      return getMS().listPartitionValues(dbName, tblName, request.getPartitionKeys(),
+      return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(),
           request.isApplyDistinct(), request.getFilter(), request.isAscending(),
           request.getPartitionOrder(), request.getMaxParts());
     }
@@ -4283,8 +4612,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String tableName, final Partition newPartition,
         final EnvironmentContext envContext)
         throws TException {
-      rename_partition(dbName, tableName, null,
-          newPartition, envContext);
+      String[] parsedDbName = parseDbName(dbName, conf);
+      rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition,
+          envContext);
     }
 
     @Override
@@ -4292,14 +4622,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final List<String> part_vals, final Partition new_part)
         throws TException {
       // Call rename_partition without an environment context.
-      rename_partition(db_name, tbl_name, part_vals, new_part, null);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part,
+          null);
     }
 
-    private void rename_partition(final String db_name, final String tbl_name,
+    private void rename_partition(final String catName, final String db_name, final String tbl_name,
         final List<String> part_vals, final Partition new_part,
         final EnvironmentContext envContext)
         throws TException {
-      startTableFunction("alter_partition", db_name, tbl_name);
+      startTableFunction("alter_partition", catName, db_name, tbl_name);
 
       if (LOG.isInfoEnabled()) {
         LOG.info("New partition values:" + new_part.getValues());
@@ -4317,6 +4649,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
       }
 
+      // Make sure the new partition has the catalog value set
+      if (!new_part.isSetCatName()) new_part.setCatName(catName);
+
       Partition oldPart = null;
       Exception ex = null;
       try {
@@ -4326,14 +4661,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
               partitionValidationPattern);
         }
 
-        oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part,
-                envContext, this);
+        oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name,
+            part_vals, new_part, envContext, this);
 
         // Only fetch the table if we actually have a listener
         Table table = null;
         if (!listeners.isEmpty()) {
           if (table == null) {
-            table = getMS().getTable(db_name, tbl_name);
+            table = getMS().getTable(catName, db_name, tbl_name);
           }
 
           MetaStoreListenerNotifier.notifyEvent(listeners,
@@ -4373,7 +4708,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final List<Partition> new_parts, EnvironmentContext environmentContext)
         throws TException {
 
-      startTableFunction("alter_partitions", db_name, tbl_name);
+      String[] parsedDbName = parseDbName(db_name, conf);
+      startTableFunction("alter_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
 
       if (LOG.isInfoEnabled()) {
         for (Partition tmpPart : new_parts) {
@@ -4386,10 +4722,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Exception ex = null;
       try {
         for (Partition tmpPart : new_parts) {
-          firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
+          // Make sure the catalog name is set in the new partition
+          if (!tmpPart.isSetCatName()) tmpPart.setCatName(getDefaultCatalog(conf));
+          firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this));
         }
-        oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts,
-                environmentContext, this);
+        oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME],
+            parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this);
         Iterator<Partition> olditr = oldParts.iterator();
         // Only fetch the table if we have a listener that needs it.
         Table table = null;
@@ -4403,7 +4741,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
 
           if (table == null) {
-            table = getMS().getTable(db_name, tbl_name);
+            table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
           }
 
           if (!listeners.isEmpty()) {
@@ -4443,7 +4781,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final Table newTable)
         throws InvalidOperationException, MetaException {
       // Do not set an environment context.
-      alter_table_core(dbname,name, newTable, null);
+      String[] parsedDbName = parseDbName(dbname, conf);
+      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, null);
     }
 
     @Override
@@ -4455,7 +4794,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         envContext = new EnvironmentContext();
         envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
       }
-      alter_table_core(dbname, name, newTable, envContext);
+      String[] parsedDbName = parseDbName(dbname, conf);
+      alter_table_core(parsedDbName[CAT_NAME], 

<TRUNCATED>