You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2023/04/12 17:02:45 UTC

[doris] 01/33: [zhongjin] change hive and hadoop to tbds and remove dlf and glue

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch doris-for-zhongjin
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 7cf2c3fff2f986e5d1bd9916e165e0bbb9a75c2c
Author: morningman <mo...@163.com>
AuthorDate: Mon Apr 3 23:14:31 2023 +0800

    [zhongjin] change hive and hadoop to tbds and remove dlf and glue
---
 be/CMakeLists.txt                                  |    4 +-
 be/src/io/fs/hdfs.h                                |    2 +-
 fe/fe-core/pom.xml                                 |    4 +-
 .../metastore/hive2/ProxyMetaStoreClient.java      | 2193 ------------
 .../converters/BaseCatalogToHiveConverter.java     |  541 ---
 .../catalog/converters/CatalogToHiveConverter.java |   58 -
 .../converters/CatalogToHiveConverterFactory.java  |   54 -
 .../glue/catalog/converters/ConverterUtils.java    |   49 -
 .../catalog/converters/GlueInputConverter.java     |  116 -
 .../converters/Hive3CatalogToHiveConverter.java    |   70 -
 .../catalog/converters/HiveToCatalogConverter.java |  372 --
 .../catalog/converters/PartitionNameParser.java    |  143 -
 .../ConfigurationAWSCredentialsProvider.java       |   60 -
 ...ConfigurationAWSCredentialsProviderFactory.java |   29 -
 .../exceptions/InvalidPartitionNameException.java  |   33 -
 .../catalog/exceptions/LakeFormationException.java |   33 -
 .../metastore/AWSCatalogMetastoreClient.java       | 2481 --------------
 .../metastore/AWSCredentialsProviderFactory.java   |   31 -
 .../catalog/metastore/AWSGlueClientFactory.java    |  157 -
 .../catalog/metastore/AWSGlueDecoratorBase.java    | 1343 --------
 .../glue/catalog/metastore/AWSGlueMetastore.java   |  133 -
 .../metastore/AWSGlueMetastoreBaseDecorator.java   |  198 --
 .../metastore/AWSGlueMetastoreCacheDecorator.java  |  185 -
 .../catalog/metastore/AWSGlueMetastoreFactory.java |   47 -
 .../metastore/AWSGlueMultipleCatalogDecorator.java |  370 --
 .../DefaultAWSCredentialsProviderFactory.java      |   37 -
 .../catalog/metastore/DefaultAWSGlueMetastore.java |  662 ----
 .../metastore/DefaultExecutorServiceFactory.java   |   43 -
 .../catalog/metastore/ExecutorServiceFactory.java  |   33 -
 .../glue/catalog/metastore/GlueClientFactory.java  |   34 -
 .../metastore/GlueMetastoreClientDelegate.java     | 1843 ----------
 .../SessionCredentialsProviderFactory.java         |   56 -
 .../amazonaws/glue/catalog/util/AWSGlueConfig.java |   64 -
 .../catalog/util/BatchCreatePartitionsHelper.java  |  153 -
 .../catalog/util/BatchDeletePartitionsHelper.java  |  147 -
 .../glue/catalog/util/ExpressionHelper.java        |  242 --
 .../glue/catalog/util/HiveTableValidator.java      |   86 -
 .../amazonaws/glue/catalog/util/LoggingHelper.java |   57 -
 .../glue/catalog/util/MetastoreClientUtils.java    |  141 -
 .../amazonaws/glue/catalog/util/PartitionKey.java  |   60 -
 .../glue/catalog/util/PartitionUtils.java          |   57 -
 .../java/org/apache/doris/catalog/HMSResource.java |   32 -
 .../datasource/hive/PooledHiveMetaStoreClient.java |   16 +-
 .../hive/event/GzipJSONMessageDeserializer.java    |   51 -
 .../doris/datasource/hive/event/InsertEvent.java   |    5 +-
 .../hadoop/hive/metastore/HiveMetaStoreClient.java | 3562 --------------------
 fe/pom.xml                                         |   24 +-
 thirdparty/tbds_deps/include/tbds/Pipes.hh         |  260 ++
 thirdparty/tbds_deps/include/tbds/SerialUtils.hh   |  170 +
 thirdparty/tbds_deps/include/tbds/StringUtils.hh   |   81 +
 .../tbds_deps/include/tbds/TemplateFactory.hh      |   96 +
 thirdparty/tbds_deps/include/tbds/hdfs.h           |  938 ++++++
 thirdparty/tbds_deps/lib/libhdfs.a                 |  Bin 0 -> 98692 bytes
 53 files changed, 1572 insertions(+), 16084 deletions(-)

diff --git a/be/CMakeLists.txt b/be/CMakeLists.txt
index cc0b6bbdc1..9cf43325ae 100644
--- a/be/CMakeLists.txt
+++ b/be/CMakeLists.txt
@@ -626,6 +626,7 @@ include_directories(
     SYSTEM
     ${GENSRC_DIR}/
     ${THIRDPARTY_DIR}/include
+    ${THIRDPARTY_DIR}/../tbds_deps/include
     ${GPERFTOOLS_HOME}/include
 )
 include_directories($ENV{JAVA_HOME}/include)
@@ -766,7 +767,8 @@ set(COMMON_THIRDPARTY
 
 if (ARCH_AMD64 AND OS_LINUX)
     add_library(hadoop_hdfs STATIC IMPORTED)
-    set_target_properties(hadoop_hdfs PROPERTIES IMPORTED_LOCATION ${THIRDPARTY_DIR}/lib/hadoop_hdfs/native/libhdfs.a)
+    #set_target_properties(hadoop_hdfs PROPERTIES IMPORTED_LOCATION ${THIRDPARTY_DIR}/lib/hadoop_hdfs/native/libhdfs.a)
+    set_target_properties(hadoop_hdfs PROPERTIES IMPORTED_LOCATION ${THIRDPARTY_DIR}/../tbds_deps/lib/libhdfs.a)
 
     set(COMMON_THIRDPARTY
         ${COMMON_THIRDPARTY}
diff --git a/be/src/io/fs/hdfs.h b/be/src/io/fs/hdfs.h
index eb9e1b2c07..d802df1004 100644
--- a/be/src/io/fs/hdfs.h
+++ b/be/src/io/fs/hdfs.h
@@ -18,7 +18,7 @@
 #pragma once
 
 #ifdef USE_HADOOP_HDFS
-#include <hadoop_hdfs/hdfs.h>
+#include <tbds/hdfs.h>
 #else
 #include <hdfs/hdfs.h>
 #endif
diff --git a/fe/fe-core/pom.xml b/fe/fe-core/pom.xml
index 26c104597f..a0b6e6db9d 100644
--- a/fe/fe-core/pom.xml
+++ b/fe/fe-core/pom.xml
@@ -98,7 +98,7 @@ under the License.
             <dependency>
                 <groupId>org.apache.hadoop</groupId>
                 <artifactId>hadoop-mapreduce-client</artifactId>
-                <version>${hadoop.version}</version>
+                <version>${tbds.hadoop.version}</version>
                 <scope>compile</scope>
             </dependency>
         </dependencies>
@@ -623,7 +623,7 @@ under the License.
         <dependency>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-auth</artifactId>
-            <version>${hadoop.version}</version>
+            <version>${tbds.hadoop.version}</version>
             <scope>provided</scope>
         </dependency>
         <!-- https://mvnrepository.com/artifact/io.opentelemetry/opentelemetry-api -->
diff --git a/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java b/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java
deleted file mode 100644
index 8ac9f33ee6..0000000000
--- a/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java
+++ /dev/null
@@ -1,2193 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from:
-// https://github.com/aliyun/datalake-catalog-metastore-client/blob/master/metastore-client-hive/metastore-client-hive2/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java
-// 3c6f5905
-
-package com.aliyun.datalake.metastore.hive2;
-
-import com.aliyun.datalake.metastore.common.DataLakeConfig;
-import com.aliyun.datalake.metastore.common.ProxyMode;
-import com.aliyun.datalake.metastore.common.Version;
-import com.aliyun.datalake.metastore.common.functional.FunctionalUtils;
-import com.aliyun.datalake.metastore.common.functional.ThrowingConsumer;
-import com.aliyun.datalake.metastore.common.functional.ThrowingFunction;
-import com.aliyun.datalake.metastore.common.functional.ThrowingRunnable;
-import com.aliyun.datalake.metastore.common.util.DataLakeUtil;
-import com.aliyun.datalake.metastore.common.util.ProxyLogUtils;
-import com.aliyun.datalake.metastore.hive.common.utils.ClientUtils;
-import com.aliyun.datalake.metastore.hive.common.utils.ConfigUtils;
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
-import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
-import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
-import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.metastore.utils.ObjectPair;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-
-public class ProxyMetaStoreClient implements IMetaStoreClient {
-    private static final Logger logger = LoggerFactory.getLogger(ProxyMetaStoreClient.class);
-    private final static String HIVE_FACTORY_CLASS = "org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClientFactory";
-
-    private final ProxyMode proxyMode;
-
-    // Dlf Client
-    private IMetaStoreClient dlfSessionMetaStoreClient;
-
-    // Hive Client
-    private IMetaStoreClient hiveSessionMetaStoreClient;
-
-    // ReadWrite Client
-    private IMetaStoreClient readWriteClient;
-
-    // Extra Write Client
-    private Optional<IMetaStoreClient> extraClient;
-
-    // Allow failure
-    private boolean allowFailure = false;
-
-    // copy Hive conf
-    private HiveConf hiveConf;
-
-    private final String readWriteClientType;
-
-    public ProxyMetaStoreClient(HiveConf hiveConf) throws MetaException {
-        this(hiveConf, null, false);
-    }
-
-    // morningman: add this constructor to avoid NoSuchMethod exception
-    public ProxyMetaStoreClient(Configuration conf, HiveMetaHookLoader hiveMetaHookLoader, Boolean allowEmbedded)
-            throws MetaException {
-        this((HiveConf) conf, hiveMetaHookLoader, allowEmbedded);
-    }
-
-    public ProxyMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hiveMetaHookLoader, Boolean allowEmbedded)
-            throws MetaException {
-        long startTime = System.currentTimeMillis();
-        logger.info("ProxyMetaStoreClient start, datalake-metastore-client-version:{}",
-                Version.DATALAKE_METASTORE_CLIENT_VERSION);
-        this.hiveConf = new HiveConf(hiveConf);
-
-        proxyMode = ConfigUtils.getProxyMode(hiveConf);
-
-        // init logging if needed
-        ProxyLogUtils.initLogUtils(proxyMode, hiveConf.get(DataLakeConfig.CATALOG_PROXY_LOGSTORE,
-                        ConfigUtils.getUserId(hiveConf)), hiveConf.getBoolean(DataLakeConfig.CATALOG_ACTION_LOG_ENABLED,
-                        DataLakeConfig.DEFAULT_CATALOG_ACTION_LOG_ENABLED),
-                hiveConf.getBoolean(DataLakeConfig.CATALOG_LOG_ENABLED, DataLakeConfig.DEFAULT_CATALOG_LOG_ENABLED));
-
-        // init Dlf Client if any
-        createClient(true, () -> initDlfClient(hiveConf, hiveMetaHookLoader, allowEmbedded, new ConcurrentHashMap<>()));
-
-        // init Hive Client if any
-        createClient(false, () -> initHiveClient(hiveConf, hiveMetaHookLoader, allowEmbedded, new ConcurrentHashMap<>()));
-
-        // init extraClient
-        initClientByProxyMode();
-
-        readWriteClientType = this.readWriteClient instanceof DlfSessionMetaStoreClient ? "dlf" : "hive";
-
-        logger.info("ProxyMetaStoreClient end, cost:{}ms", System.currentTimeMillis() - startTime);
-    }
-
-    public static Map<String, org.apache.hadoop.hive.ql.metadata.Table> getTempTablesForDatabase(String dbName) {
-        return getTempTables().get(dbName);
-    }
-
-    public static Map<String, Map<String, org.apache.hadoop.hive.ql.metadata.Table>> getTempTables() {
-        SessionState ss = SessionState.get();
-        if (ss == null) {
-            return Collections.emptyMap();
-        }
-        return ss.getTempTables();
-    }
-
-    public HiveConf getHiveConf() {
-        return hiveConf;
-    }
-
-    public void initClientByProxyMode() throws MetaException {
-        switch (proxyMode) {
-            case METASTORE_ONLY:
-                this.readWriteClient = hiveSessionMetaStoreClient;
-                this.extraClient = Optional.empty();
-                break;
-            case METASTORE_DLF_FAILURE:
-                this.allowFailure = true;
-                this.readWriteClient = hiveSessionMetaStoreClient;
-                this.extraClient = Optional.ofNullable(dlfSessionMetaStoreClient);
-                break;
-            case METASTORE_DLF_SUCCESS:
-                this.readWriteClient = hiveSessionMetaStoreClient;
-                this.extraClient = Optional.of(dlfSessionMetaStoreClient);
-                break;
-            case DLF_METASTORE_SUCCESS:
-                this.readWriteClient = dlfSessionMetaStoreClient;
-                this.extraClient = Optional.of(hiveSessionMetaStoreClient);
-                break;
-            case DLF_METASTORE_FAILURE:
-                this.allowFailure = true;
-                this.readWriteClient = dlfSessionMetaStoreClient;
-                this.extraClient = Optional.ofNullable(hiveSessionMetaStoreClient);
-                break;
-            case DLF_ONLY:
-                this.readWriteClient = dlfSessionMetaStoreClient;
-                this.extraClient = Optional.empty();
-                break;
-            default:
-                throw new IllegalStateException("Unexpected value: " + proxyMode);
-        }
-    }
-
-    private void createClient(boolean isDlf, ThrowingRunnable<MetaException> createClient) throws MetaException {
-        try {
-            createClient.run();
-        } catch (Exception e) {
-            if ((isDlf && proxyMode == ProxyMode.METASTORE_DLF_FAILURE)) {
-                dlfSessionMetaStoreClient = null;
-            } else if (!isDlf && proxyMode == ProxyMode.DLF_METASTORE_FAILURE) {
-                hiveSessionMetaStoreClient = null;
-            } else {
-                throw DataLakeUtil.throwException(new MetaException(e.getMessage()), e);
-            }
-        }
-    }
-
-    public void initHiveClient(HiveConf hiveConf, HiveMetaHookLoader hiveMetaHookLoader, boolean allowEmbedded,
-            ConcurrentHashMap<String, Long> metaCallTimeMap) throws MetaException {
-        switch (proxyMode) {
-            case METASTORE_ONLY:
-            case METASTORE_DLF_FAILURE:
-            case METASTORE_DLF_SUCCESS:
-            case DLF_METASTORE_SUCCESS:
-            case DLF_METASTORE_FAILURE:
-                this.hiveSessionMetaStoreClient = ClientUtils.createMetaStoreClient(HIVE_FACTORY_CLASS,
-                        hiveConf, hiveMetaHookLoader, allowEmbedded, metaCallTimeMap);
-                break;
-            case DLF_ONLY:
-                break;
-            default:
-                throw new IllegalStateException("Unexpected value: " + proxyMode);
-        }
-    }
-
-    public void initDlfClient(HiveConf hiveConf, HiveMetaHookLoader hiveMetaHookLoader, boolean allowEmbedded,
-            ConcurrentHashMap<String, Long> metaCallTimeMap) throws MetaException {
-        switch (proxyMode) {
-            case METASTORE_ONLY:
-                break;
-            case METASTORE_DLF_FAILURE:
-            case METASTORE_DLF_SUCCESS:
-            case DLF_METASTORE_SUCCESS:
-            case DLF_METASTORE_FAILURE:
-            case DLF_ONLY:
-                this.dlfSessionMetaStoreClient = new DlfSessionMetaStoreClient(hiveConf, hiveMetaHookLoader, allowEmbedded);
-                break;
-            default:
-                throw new IllegalStateException("Unexpected value: " + proxyMode);
-        }
-    }
-
-    @Override
-    public boolean isCompatibleWith(Configuration conf) {
-        try {
-            return call(this.readWriteClient, client -> client.isCompatibleWith(conf), "isCompatibleWith", conf);
-        } catch (TException e) {
-            logger.error(e.getMessage(), e);
-        }
-        return false;
-    }
-
-    @Override
-    public void setHiveAddedJars(String s) {
-        try {
-            run(client -> client.setHiveAddedJars(s), "setHiveAddedJars", s);
-        } catch (TException e) {
-            logger.error(e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public boolean isLocalMetaStore() {
-        return !extraClient.isPresent() && readWriteClient.isLocalMetaStore();
-    }
-
-    @Override
-    public void reconnect() throws MetaException {
-        if (hiveSessionMetaStoreClient != null) {
-            hiveSessionMetaStoreClient.reconnect();
-        }
-    }
-
-    @Override
-    public void close() {
-        if (hiveSessionMetaStoreClient != null) {
-            hiveSessionMetaStoreClient.close();
-        }
-    }
-
-    @Override
-    public void createDatabase(Database database)
-            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        run(client -> client.createDatabase(database), "createDatabase", database);
-    }
-
-    @Override
-    public Database getDatabase(String name) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getDatabase(name), "getDatabase", name);
-    }
-
-    @Override
-    public Database getDatabase(String catalogId, String databaseName) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getDatabase(catalogId, databaseName), "getDatabase", catalogId, databaseName);
-    }
-
-    @Override
-    public List<String> getDatabases(String pattern) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getDatabases(pattern), "getDatabases", pattern);
-    }
-
-    @Override
-    public List<String> getDatabases(String catalogId, String databasePattern) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getDatabases(catalogId, databasePattern), "getDatabases", catalogId, databasePattern);
-    }
-
-    @Override
-    public List<String> getAllDatabases() throws MetaException, TException {
-        return getDatabases(".*");
-    }
-
-    @Override
-    public List<String> getAllDatabases(String catalogId) throws MetaException, TException {
-        return getDatabases(catalogId);
-    }
-
-    @Override
-    public void alterDatabase(String databaseName, Database database)
-            throws NoSuchObjectException, MetaException, TException {
-        run(client -> client.alterDatabase(databaseName, database), "alterDatabase", databaseName, database);
-    }
-
-    @Override
-    public void alterDatabase(String catalogId, String databaseName, Database database) throws NoSuchObjectException, MetaException, TException {
-        run(client -> client.alterDatabase(catalogId, databaseName, database), "alterDatabase", catalogId, databaseName, database);
-    }
-
-    @Override
-    public void dropDatabase(String name)
-            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-        dropDatabase(name, true, false, false);
-    }
-
-    @Override
-    public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
-            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-        dropDatabase(name, deleteData, ignoreUnknownDb, false);
-    }
-
-    @Override
-    public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
-            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-        run(client -> client.dropDatabase(name, deleteData, ignoreUnknownDb, cascade), "dropDatabase", name, deleteData,
-                ignoreUnknownDb, cascade);
-    }
-
-    @Override
-    public void dropDatabase(String catalogId, String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-        run(client -> client.dropDatabase(catalogId, name, deleteData, ignoreUnknownDb, cascade), "dropDatabase", catalogId, name, deleteData, ignoreUnknownDb, cascade);
-    }
-
-    @Override
-    public Partition add_partition(Partition partition)
-            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.add_partition(partition), "add_partition", partition);
-    }
-
-    @Override
-    public int add_partitions(List<Partition> partitions)
-            throws InvalidObjectException, AlreadyExistsException, MetaException,
-            TException {
-        return call(client -> client.add_partitions(partitions), "add_partitions", partitions);
-    }
-
-    @Override
-    public List<Partition> add_partitions(
-            List<Partition> partitions,
-            boolean ifNotExists,
-            boolean needResult
-    ) throws TException {
-        return call(client -> client.add_partitions(partitions, ifNotExists, needResult), "add_partitions", partitions, ifNotExists, needResult);
-    }
-
-    @Override
-    public int add_partitions_pspec(PartitionSpecProxy pSpec)
-            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.add_partitions_pspec(pSpec), "add_partitions_pspec", pSpec);
-    }
-
-    @Override
-    public void alterFunction(String dbName, String functionName, Function newFunction)
-            throws InvalidObjectException, MetaException, TException {
-        run(client -> client.alterFunction(dbName, functionName, newFunction), "alterFunction", dbName, functionName, newFunction);
-    }
-
-    @Override
-    public void alterFunction(String catalogId, String dbName, String functionName, Function newFunction) throws InvalidObjectException, MetaException, TException {
-        run(client -> client.alterFunction(catalogId, dbName, functionName, newFunction), "alterFunction", catalogId, dbName, functionName, newFunction);
-    }
-
-    @Override
-    public void alter_partition(String dbName, String tblName, Partition partition)
-            throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partition(dbName, tblName, partition), "alter_partition", dbName, tblName, partition);
-    }
-
-    @Override
-    public void alter_partition(
-            String dbName,
-            String tblName,
-            Partition partition,
-            EnvironmentContext environmentContext
-    ) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partition(dbName, tblName, partition, environmentContext), "alter_partition", dbName, tblName, partition, environmentContext);
-    }
-
-    @Override
-    public void alter_partition(String catalogId, String dbName, String tblName, Partition partition, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partition(catalogId, dbName, tblName, partition, environmentContext), "alter_partition", catalogId, dbName, tblName, partition, environmentContext);
-    }
-
-    @Override
-    public void alter_partitions(
-            String dbName,
-            String tblName,
-            List<Partition> partitions
-    ) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partitions(dbName, tblName, partitions), "alter_partitions", dbName, tblName, partitions);
-    }
-
-    @Override
-    public void alter_partitions(
-            String dbName,
-            String tblName,
-            List<Partition> partitions,
-            EnvironmentContext environmentContext
-    ) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partitions(dbName, tblName, partitions, environmentContext), "alter_partitions", dbName, tblName, partitions, environmentContext);
-    }
-
-    @Override
-    public void alter_partitions(String catalogId, String dbName, String tblName, List<Partition> partitions, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.alter_partitions(catalogId, dbName, tblName, partitions, environmentContext), "alter_partitions", catalogId, dbName, tblName, partitions, environmentContext);
-    }
-
-    @Override
-    public void alter_table(String dbName, String tblName, Table table)
-            throws InvalidOperationException, MetaException, TException {
-        if (table.isTemporary()) {
-            run(this.readWriteClient, client -> client.alter_table(dbName, tblName, table), "alter_table", dbName, tblName, table);
-        } else {
-            run(client -> client.alter_table(dbName, tblName, table), "alter_table", dbName, tblName, table);
-        }
-    }
-
-    @Override
-    public void alter_table(String catalogId, String dbName, String tblName, Table table, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException {
-        if (table.isTemporary()) {
-            run(this.readWriteClient, client -> client.alter_table(catalogId, dbName, tblName, table, environmentContext), "alter_table", catalogId, dbName, tblName, table, environmentContext);
-        } else {
-            run(client -> client.alter_table(catalogId, dbName, tblName, table, environmentContext), "alter_table", catalogId, dbName, tblName, table, environmentContext);
-        }
-    }
-
-    @Override
-    @Deprecated
-    public void alter_table(
-            String dbName,
-            String tblName,
-            Table table,
-            boolean cascade
-    ) throws InvalidOperationException, MetaException, TException {
-        if (table.isTemporary()) {
-            run(this.readWriteClient, client -> client.alter_table(dbName, tblName, table, cascade), "alter_table", dbName, tblName, table, cascade);
-        } else {
-            run(client -> client.alter_table(dbName, tblName, table, cascade), "alter_table", dbName, tblName, table, cascade);
-        }
-    }
-
-    @Override
-    public void alter_table_with_environmentContext(
-            String dbName,
-            String tblName,
-            Table table,
-            EnvironmentContext environmentContext
-    ) throws InvalidOperationException, MetaException, TException {
-        if (table.isTemporary()) {
-            run(this.readWriteClient, client -> client.alter_table_with_environmentContext(dbName, tblName, table, environmentContext), "alter_table_with_environmentContext", dbName, tblName, table, environmentContext);
-        } else {
-            run(client -> client.alter_table_with_environmentContext(dbName, tblName, table, environmentContext), "alter_table_with_environmentContext", dbName,
-                    tblName, table, environmentContext);
-        }
-    }
-
-    @Override
-    public Partition appendPartition(String dbName, String tblName, List<String> values)
-            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.appendPartition(dbName, tblName, values), "appendPartition", dbName, tblName, values);
-    }
-
-    @Override
-    public Partition appendPartition(String catalogId, String dbName, String tblName, List<String> values) throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.appendPartition(catalogId, dbName, tblName, values), "appendPartition", catalogId, dbName, tblName, values);
-    }
-
-    @Override
-    public Partition appendPartition(String dbName, String tblName, String partitionName)
-            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.appendPartition(dbName, tblName, partitionName), "appendPartition", dbName, tblName, partitionName);
-    }
-
-    @Override
-    public Partition appendPartition(String catalogId, String dbName, String tblName, String partitionName) throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-        return call(client -> client.appendPartition(catalogId, dbName, tblName, partitionName), "appendPartition", catalogId, dbName, tblName, partitionName);
-    }
-
-    @Override
-    public boolean create_role(Role role) throws MetaException, TException {
-        return call(client -> client.create_role(role), "create_role", role);
-    }
-
-    @Override
-    public boolean drop_role(String roleName) throws MetaException, TException {
-        return call(client -> client.drop_role(roleName), "drop_role", roleName);
-    }
-
-    @Override
-    public List<Role> list_roles(
-            String principalName, PrincipalType principalType
-    ) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.list_roles(principalName, principalType), "list_roles", principalName, principalType);
-    }
-
-    @Override
-    public List<String> listRoleNames() throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.listRoleNames(), "listRoleNames");
-    }
-
-    @Override
-    public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)
-            throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.get_principals_in_role(request), "get_principals_in_role", request);
-    }
-
-    @Override
-    public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
-            GetRoleGrantsForPrincipalRequest request) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.get_role_grants_for_principal(request), "get_role_grants_for_principal", request);
-    }
-
-    @Override
-    public boolean grant_role(
-            String roleName,
-            String userName,
-            PrincipalType principalType,
-            String grantor,
-            PrincipalType grantorType,
-            boolean grantOption
-    ) throws MetaException, TException {
-        return call(client -> client.grant_role(roleName, userName, principalType, grantor, grantorType, grantOption)
-                , "grant_role", roleName, userName, principalType, grantor, grantorType);
-    }
-
-    @Override
-    public boolean revoke_role(
-            String roleName,
-            String userName,
-            PrincipalType principalType,
-            boolean grantOption
-    ) throws MetaException, TException {
-        return call(client -> client.revoke_role(roleName, userName, principalType, grantOption), "revoke_role", roleName, userName,
-                principalType, grantOption);
-    }
-
-    @Override
-    public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
-        run(client -> client.cancelDelegationToken(tokenStrForm), "cancelDelegationToken", tokenStrForm);
-    }
-
-    @Override
-    public String getTokenStrForm() throws IOException {
-        try {
-            return call(this.readWriteClient, client -> {
-                try {
-                    return client.getTokenStrForm();
-                } catch (IOException e) {
-                    throw new TException(e.getMessage(), e);
-                }
-            }, "getTokenStrForm");
-        } catch (TException e) {
-            throw new IOException(e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
-        return call(client -> client.addToken(tokenIdentifier, delegationToken), "addToken", tokenIdentifier, delegationToken);
-    }
-
-    @Override
-    public boolean removeToken(String tokenIdentifier) throws TException {
-        return call(client -> client.removeToken(tokenIdentifier), "removeToken", tokenIdentifier);
-    }
-
-    @Override
-    public String getToken(String tokenIdentifier) throws TException {
-        return call(this.readWriteClient, client -> client.getToken(tokenIdentifier), "getToken", tokenIdentifier);
-    }
-
-    @Override
-    public List<String> getAllTokenIdentifiers() throws TException {
-        return call(this.readWriteClient, client -> client.getAllTokenIdentifiers(), "getAllTokenIdentifiers");
-    }
-
-    @Override
-    public int addMasterKey(String key) throws MetaException, TException {
-        return call(client -> client.addMasterKey(key), "addMasterKey", key);
-    }
-
-    @Override
-    public void updateMasterKey(Integer seqNo, String key)
-            throws NoSuchObjectException, MetaException, TException {
-        run(client -> client.updateMasterKey(seqNo, key), "updateMasterKey", key);
-    }
-
-    @Override
-    public boolean removeMasterKey(Integer keySeq) throws TException {
-        return call(client -> client.removeMasterKey(keySeq), "removeMasterKey", keySeq);
-    }
-
-    @Override
-    public String[] getMasterKeys() throws TException {
-        return call(this.readWriteClient, client -> client.getMasterKeys(), "getMasterKeys");
-    }
-
-    @Override
-    public LockResponse checkLock(long lockId)
-            throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException {
-        return call(this.readWriteClient, client -> client.checkLock(lockId), "checkLock", lockId);
-    }
-
-    @Override
-    public void commitTxn(long txnId) throws NoSuchTxnException, TxnAbortedException, TException {
-        run(client -> client.commitTxn(txnId), "commitTxn", txnId);
-    }
-
-    @Override
-    public void replCommitTxn(long srcTxnId, String replPolicy) throws NoSuchTxnException, TxnAbortedException, TException {
-        run(client -> client.replCommitTxn(srcTxnId, replPolicy), "replCommitTxn", srcTxnId, replPolicy);
-    }
-
-    @Override
-    public void abortTxns(List<Long> txnIds) throws TException {
-        run(client -> client.abortTxns(txnIds), "abortTxns", txnIds);
-    }
-
-    @Override
-    public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException {
-        return call(client -> client.allocateTableWriteId(txnId, dbName, tableName), "allocateTableWriteId", txnId, dbName, tableName);
-    }
-
-    @Override
-    public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List<String> partNames) throws TException {
-        run(client -> client.replTableWriteIdState(validWriteIdList, dbName, tableName, partNames), "replTableWriteIdState", validWriteIdList, dbName, tableName, partNames);
-    }
-
-    @Override
-    public List<TxnToWriteId> allocateTableWriteIdsBatch(List<Long> txnIds, String dbName, String tableName) throws TException {
-        return call(client -> client.allocateTableWriteIdsBatch(txnIds, dbName, tableName), "allocateTableWriteIdsBatch", txnIds, dbName, tableName);
-    }
-
-    @Override
-    public List<TxnToWriteId> replAllocateTableWriteIdsBatch(String dbName, String tableName,
-            String replPolicy, List<TxnToWriteId> srcTxnToWriteIdList) throws TException {
-        return call(client -> client.replAllocateTableWriteIdsBatch(dbName, tableName, replPolicy, srcTxnToWriteIdList), "replAllocateTableWriteIdsBatch", dbName, tableName, replPolicy, srcTxnToWriteIdList);
-    }
-
-    @Override
-    @Deprecated
-    public void compact(
-            String dbName,
-            String tblName,
-            String partitionName,
-            CompactionType compactionType
-    ) throws TException {
-        run(client -> client.compact(dbName, tblName, partitionName, compactionType), "compact", dbName, tblName, partitionName, compactionType);
-    }
-
-    @Override
-    @Deprecated
-    public void compact(
-            String dbName,
-            String tblName,
-            String partitionName,
-            CompactionType compactionType,
-            Map<String, String> tblProperties
-    ) throws TException {
-        run(client -> client.compact(dbName, tblName, partitionName, compactionType, tblProperties), "compact", dbName, tblName, partitionName, compactionType, tblProperties);
-    }
-
-    @Override
-    public CompactionResponse compact2(
-            String dbName,
-            String tblName,
-            String partitionName,
-            CompactionType compactionType,
-            Map<String, String> tblProperties
-    ) throws TException {
-        return call(client -> client.compact2(dbName, tblName, partitionName, compactionType, tblProperties), "compact2", dbName, tblName, partitionName, compactionType, tblProperties);
-    }
-
-    @Override
-    public void createFunction(Function function) throws InvalidObjectException, MetaException, TException {
-        run(client -> client.createFunction(function), "createFunction", function);
-    }
-
-    @Override
-    public void createTable(Table tbl)
-            throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException {
-        createTable(tbl, null);
-    }
-
-    public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
-            InvalidObjectException, MetaException, NoSuchObjectException, TException {
-        // Subclasses can override this step (for example, for temporary tables)
-        if (tbl.isTemporary()) {
-            run(this.readWriteClient, client -> client.createTable(tbl), "createTable", tbl);
-        } else {
-            run(client -> client.createTable(tbl), "createTable", tbl);
-        }
-    }
-
-    @Override
-    public boolean deletePartitionColumnStatistics(
-            String dbName, String tableName, String partName, String colName
-    ) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException {
-        return call(client -> client.deletePartitionColumnStatistics(dbName, tableName, partName, colName), "deletePartitionColumnStatistics", dbName,
-                tableName, partName, colName);
-    }
-
-    @Override
-    public boolean deletePartitionColumnStatistics(String catalogId, String dbName, String tableName, String partName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException {
-        return call(client -> client.deletePartitionColumnStatistics(catalogId, dbName, tableName, partName, colName), "deletePartitionColumnStatistics", catalogId, dbName, tableName, partName, colName);
-    }
-
-    @Override
-    public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
-            throws NoSuchObjectException, MetaException, InvalidObjectException,
-            TException, InvalidInputException {
-        if (getTempTable(dbName, tableName) != null) {
-            return call(this.readWriteClient, client -> client.deleteTableColumnStatistics(dbName, tableName, colName), "deleteTableColumnStatistics", dbName, tableName, colName);
-        } else {
-            return call(client -> client.deleteTableColumnStatistics(dbName, tableName, colName), "deleteTableColumnStatistics", dbName, tableName, colName);
-        }
-    }
-
-    @Override
-    public boolean deleteTableColumnStatistics(String catalogId, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException {
-        if (getTempTable(dbName, tableName) != null) {
-            return call(this.readWriteClient, client -> client.deleteTableColumnStatistics(catalogId, dbName, tableName, colName), "deleteTableColumnStatistics", catalogId, dbName, tableName, colName);
-        } else {
-            return call(client -> client.deleteTableColumnStatistics(catalogId, dbName, tableName, colName), "deleteTableColumnStatistics", catalogId, dbName, tableName, colName);
-        }
-    }
-
-    @Override
-    public void dropFunction(String dbName, String functionName) throws MetaException, NoSuchObjectException,
-            InvalidObjectException, InvalidInputException, TException {
-        run(client -> client.dropFunction(dbName, functionName), "dropFunction", dbName, functionName);
-    }
-
-    @Override
-    public void dropFunction(String catalogId, String dbName, String functionName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, TException {
-        run(client -> client.dropFunction(catalogId, dbName, functionName), "dropFunction", catalogId, dbName, functionName);
-    }
-
-    @Override
-    public boolean dropPartition(String dbName, String tblName, List<String> values, boolean deleteData)
-            throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartition(dbName, tblName, values, deleteData), "dropPartition", dbName, tblName, values, deleteData);
-    }
-
-    @Override
-    public boolean dropPartition(String catalogId, String dbName, String tblName, List<String> values, boolean deleteData) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartition(catalogId, dbName, tblName, values, deleteData), "dropPartition", catalogId, dbName, tblName, values, deleteData);
-    }
-
-    @Override
-    public boolean dropPartition(String dbName, String tblName, List<String> values, PartitionDropOptions options)
-            throws TException {
-        return call(client -> client.dropPartition(dbName, tblName, values, options), "dropPartition", dbName, tblName, values, options);
-    }
-
-    @Override
-    public boolean dropPartition(String catalogId, String dbName, String tblName, List<String> values, PartitionDropOptions options) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartition(catalogId, dbName, tblName, values, options), "dropPartition", catalogId, dbName, tblName, values, options);
-    }
-
-    @Override
-    public List<Partition> dropPartitions(String dbName, String tblName,
-            List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
-            boolean ifExists) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartitions(dbName, tblName, partExprs, deleteData, ifExists), "dropPartitions", dbName, tblName, partExprs, deleteData, ifExists);
-    }
-
-    @Override
-    public List<Partition> dropPartitions(String dbName, String tblName,
-            List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
-            boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartitions(dbName, tblName, partExprs, deleteData, ifExists, needResult), "dropPartitions", dbName, tblName, partExprs, deleteData, ifExists, needResult);
-    }
-
-    @Override
-    public List<Partition> dropPartitions(String dbName, String tblName, List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions partitionDropOptions) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartitions(dbName, tblName, partExprs, partitionDropOptions), "dropPartitions", dbName, tblName, partExprs, partitionDropOptions);
-    }
-
-    @Override
-    public List<Partition> dropPartitions(String catalogId, String dbName, String tblName, List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions partitionDropOptions) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartitions(catalogId, dbName, tblName, partExprs, partitionDropOptions), "dropPartitions", catalogId, dbName, tblName, partExprs, partitionDropOptions);
-    }
-
-    @Override
-    public boolean dropPartition(String dbName, String tblName, String partitionName, boolean deleteData)
-            throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartition(dbName, tblName, partitionName, deleteData), "dropPartition", dbName, tblName,
-                partitionName, deleteData);
-    }
-
-    @Override
-    public boolean dropPartition(String catName, String dbName, String tblName, String partitionName,
-            boolean deleteData) throws NoSuchObjectException, MetaException, TException {
-        return call(client -> client.dropPartition(catName, dbName, tblName, partitionName, deleteData), "dropPartition", catName, dbName, tblName, partitionName, deleteData);
-    }
-
-    private Table getTempTable(String dbName, String tableName) {
-        Map<String, org.apache.hadoop.hive.ql.metadata.Table> tables = getTempTablesForDatabase(dbName.toLowerCase());
-        if (tables != null) {
-            org.apache.hadoop.hive.ql.metadata.Table table = tables.get(tableName.toLowerCase());
-            if (table != null) {
-                return table.getTTable();
-            }
-        }
-        return null;
-    }
-
-    @Override
-    public void dropTable(String dbname, String tableName)
-            throws MetaException, TException, NoSuchObjectException {
-        Table table = getTempTable(dbname, tableName);
-        if (table != null) {
-            run(this.readWriteClient, client -> client.dropTable(dbname, tableName), "dropTable", dbname, tableName);
-        } else {
-            run(client -> client.dropTable(dbname, tableName), "dropTable", dbname, tableName);
-        }
-    }
-
-    @Override
-    public void dropTable(String catalogId, String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, NoSuchObjectException, TException {
-        Table table = getTempTable(dbname, tableName);
-        if (table != null) {
-            run(this.readWriteClient, client -> client.dropTable(catalogId, dbname, tableName, deleteData, ignoreUnknownTab, ifPurge), "dropTable", catalogId, dbname, tableName, deleteData, ignoreUnknownTab, ifPurge);
-        } else {
-            run(client -> client.dropTable(catalogId, dbname, tableName, deleteData, ignoreUnknownTab, ifPurge), "dropTable", catalogId, dbname, tableName, deleteData, ignoreUnknownTab, ifPurge);
-        }
-    }
-
-    @Override
-    public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
-        run(client -> client.truncateTable(dbName, tableName, partNames), "truncateTable", dbName, tableName, partNames);
-    }
-
-    @Override
-    public void truncateTable(String catName, String dbName, String tableName, List<String> partNames) throws MetaException, TException {
-        run(client -> client.truncateTable(catName, dbName, tableName, partNames), "truncateTable", catName, dbName, tableName, partNames);
-    }
-
-    @Override
-    public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest cmRecycleRequest) throws MetaException, TException {
-        return call(client -> client.recycleDirToCmPath(cmRecycleRequest), "recycleDirToCmPath", cmRecycleRequest);
-    }
-
-    @Override
-    public void dropTable(
-            String dbname,
-            String tableName,
-            boolean deleteData,
-            boolean ignoreUnknownTab
-    ) throws MetaException, TException, NoSuchObjectException {
-        Table table = getTempTable(dbname, tableName);
-        if (table != null) {
-            run(this.readWriteClient, client -> client.dropTable(dbname, tableName, deleteData, ignoreUnknownTab), "dropTable", dbname, tableName, deleteData, ignoreUnknownTab);
-        } else {
-            run(client -> client.dropTable(dbname, tableName, deleteData, ignoreUnknownTab), "dropTable", dbname, tableName, deleteData, ignoreUnknownTab);
-        }
-    }
-
-    @Override
-    public void dropTable(
-            String dbname,
-            String tableName,
-            boolean deleteData,
-            boolean ignoreUnknownTable,
-            boolean ifPurge
-    ) throws MetaException, TException, NoSuchObjectException {
-        Table table = getTempTable(dbname, tableName);
-        if (table != null) {
-            run(this.readWriteClient, client -> client.dropTable(dbname, tableName, deleteData, ignoreUnknownTable, ifPurge), "dropTable", dbname, tableName, deleteData, ignoreUnknownTable, ifPurge);
-        } else {
-            run(client -> client.dropTable(dbname, tableName, deleteData, ignoreUnknownTable, ifPurge), "dropTable", dbname, tableName, deleteData, ignoreUnknownTable, ifPurge);
-        }
-    }
-
-    @Override
-    public Partition exchange_partition(
-            Map<String, String> partitionSpecs,
-            String srcDb,
-            String srcTbl,
-            String dstDb,
-            String dstTbl
-    ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-        return call(client -> client.exchange_partition(partitionSpecs, srcDb, srcTbl, dstDb, dstTbl), "exchange_partition", partitionSpecs
-                , srcDb, srcTbl, dstDb, dstTbl);
-    }
-
-    @Override
-    public Partition exchange_partition(Map<String, String> partitionSpecs, String srcCatalogId, String srcDb, String srcTbl, String descCatalogId, String dstDb, String dstTbl) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-        return call(client -> client.exchange_partition(partitionSpecs, srcCatalogId, srcDb, srcTbl, descCatalogId, dstDb, dstTbl), "exchange_partition", partitionSpecs, srcCatalogId, srcDb, srcTbl, descCatalogId, dstDb, dstTbl);
-    }
-
-    @Override
-    public List<Partition> exchange_partitions(
-            Map<String, String> partitionSpecs,
-            String sourceDb,
-            String sourceTbl,
-            String destDb,
-            String destTbl
-    ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-        return call(client -> client.exchange_partitions(partitionSpecs, sourceDb, sourceTbl, destDb, destTbl), "exchange_partitions",
-                partitionSpecs, sourceDb, sourceTbl, destDb, destTbl);
-    }
-
-    @Override
-    public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String srcCatalogId, String sourceDb, String sourceTbl, String dstCatalogId, String destDb, String destTbl) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-        return call(client -> client.exchange_partitions(partitionSpecs, srcCatalogId, sourceDb, sourceTbl, dstCatalogId, destDb, destTbl), "exchange_partitions", partitionSpecs, srcCatalogId, sourceDb, sourceTbl, dstCatalogId, destDb, destTbl);
-    }
-
-    @Override
-    public AggrStats getAggrColStatsFor(
-            String dbName,
-            String tblName,
-            List<String> colNames,
-            List<String> partName
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getAggrColStatsFor(dbName, tblName, colNames, partName), "getAggrColStatsFor", dbName, tblName, colNames, partName);
-    }
-
-    @Override
-    public AggrStats getAggrColStatsFor(
-            String catalogId,
-            String dbName,
-            String tblName,
-            List<String> colNames,
-            List<String> partName
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getAggrColStatsFor(catalogId, dbName, tblName, colNames, partName), "getAggrColStatsFor", catalogId, dbName, tblName, colNames, partName);
-    }
-
-    @Override
-    public List<String> getAllTables(String dbname)
-            throws MetaException, TException, UnknownDBException {
-        return getTables(dbname, ".*");
-    }
-
-    @Override
-    public List<String> getAllTables(String catalogId, String dbName) throws MetaException, TException, UnknownDBException {
-        return getTables(catalogId, dbName);
-    }
-
-    @Override
-    public String getConfigValue(String name, String defaultValue)
-            throws TException, ConfigValSecurityException {
-        return call(this.readWriteClient, client -> client.getConfigValue(name, defaultValue), "getConfigValue", name, defaultValue);
-    }
-
-    @Override
-    public String getDelegationToken(String owner, String renewerKerberosPrincipalName)
-            throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getDelegationToken(owner, renewerKerberosPrincipalName), "getDelegationToken", owner, renewerKerberosPrincipalName);
-    }
-
-    @Override
-    public List<FieldSchema> getFields(String db, String tableName) throws TException {
-        return call(this.readWriteClient, client -> client.getFields(db, tableName), "getFields", db, tableName);
-    }
-
-    @Override
-    public List<FieldSchema> getFields(String catalogId, String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getFields(catalogId, db, tableName), "getFields", catalogId, db, tableName);
-    }
-
-    @Override
-    public Function getFunction(String dbName, String functionName) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getFunction(dbName, functionName), "getFunction", dbName, functionName);
-    }
-
-    @Override
-    public Function getFunction(String catalogId, String dbName, String funcName) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getFunction(catalogId, dbName, funcName), "getFunction", catalogId, dbName, funcName);
-    }
-
-    @Override
-    public List<String> getFunctions(String dbName, String pattern) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getFunctions(dbName, pattern), "getFunctions", dbName, pattern);
-    }
-
-    @Override
-    public List<String> getFunctions(String catalogId, String dbName, String pattern) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getFunctions(catalogId, dbName, pattern), "getFunctions", catalogId, dbName, pattern);
-    }
-
-    @Override
-    public GetAllFunctionsResponse getAllFunctions() throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getAllFunctions(), "getAllFunctions");
-    }
-
-    @Override
-    public String getMetaConf(String key) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getMetaConf(key), "getMetaConf", key);
-    }
-
-    @Override
-    public void createCatalog(Catalog catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
-        run(client -> client.createCatalog(catalog), "createCatalog", catalog);
-    }
-
-    @Override
-    public void alterCatalog(String catalogName, Catalog newCatalog) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        run(client -> client.alterCatalog(catalogName, newCatalog), "alterCatalog", catalogName, newCatalog);
-    }
-
-    @Override
-    public Catalog getCatalog(String catalogId) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getCatalog(catalogId), "getCatalog", catalogId);
-    }
-
-    @Override
-    public List<String> getCatalogs() throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getCatalogs(), "getCatalogs");
-    }
-
-    @Override
-    public void dropCatalog(String catalogId) throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-        run(client -> client.dropCatalog(catalogId), "dropCatalog", catalogId);
-    }
-
-    @Override
-    public Partition getPartition(String dbName, String tblName, List<String> values)
-            throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartition(dbName, tblName, values), "getPartition", dbName, tblName, values);
-    }
-
-    @Override
-    public Partition getPartition(String catalogId, String dbName, String tblName, List<String> values) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartition(catalogId, dbName, tblName, values), "getPartition", catalogId, dbName, tblName, values);
-    }
-
-    @Override
-    public Partition getPartition(String dbName, String tblName, String partitionName)
-            throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getPartition(dbName, tblName, partitionName), "getPartition", dbName, tblName, partitionName);
-    }
-
-    @Override
-    public Partition getPartition(String catalogId, String dbName, String tblName, String partitionName) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getPartition(catalogId, dbName, tblName, partitionName), "getPartition", catalogId, dbName, tblName, partitionName);
-    }
-
-    @Override
-    public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> partitionNames,
-            List<String> columnNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionColumnStatistics(dbName, tableName, partitionNames, columnNames), "getPartitionColumnStatistics", dbName, tableName, partitionNames, columnNames);
-    }
-
-    @Override
-    public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
-            String catalogId,
-            String dbName,
-            String tableName,
-            List<String> partitionNames,
-            List<String> columnNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionColumnStatistics(catalogId, dbName, tableName, partitionNames, columnNames), "getPartitionColumnStatistics", catalogId, dbName, tableName, partitionNames, columnNames);
-    }
-
-    @Override
-    public Partition getPartitionWithAuthInfo(
-            String databaseName,
-            String tableName,
-            List<String> values,
-            String userName,
-            List<String> groupNames
-    ) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionWithAuthInfo(databaseName, tableName, values, userName, groupNames), "getPartitionWithAuthInfo", databaseName, tableName, values, userName, groupNames);
-    }
-
-    @Override
-    public Partition getPartitionWithAuthInfo(
-            String catalogId,
-            String databaseName,
-            String tableName,
-            List<String> values,
-            String userName,
-            List<String> groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionWithAuthInfo(catalogId, databaseName, tableName, values, userName, groupNames), "getPartitionWithAuthInfo", catalogId, databaseName, tableName, values, userName, groupNames);
-    }
-
-    @Override
-    public List<Partition> getPartitionsByNames(
-            String databaseName,
-            String tableName,
-            List<String> partitionNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionsByNames(databaseName, tableName, partitionNames), "getPartitionsByNames", databaseName, tableName, partitionNames);
-    }
-
-    @Override
-    public List<Partition> getPartitionsByNames(
-            String catalogId,
-            String databaseName,
-            String tableName,
-            List<String> partitionNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getPartitionsByNames(catalogId, databaseName, tableName, partitionNames), "getPartitionsByNames", catalogId, databaseName, tableName, partitionNames);
-    }
-
-    @Override
-    public List<FieldSchema> getSchema(String db, String tableName) throws TException {
-        return call(this.readWriteClient, client -> client.getSchema(db, tableName), "getSchema", db, tableName);
-    }
-
-    @Override
-    public List<FieldSchema> getSchema(String catalogId, String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getSchema(catalogId, db, tableName), "getSchema", catalogId, db, tableName);
-    }
-
-    @Override
-    public Table getTable(String dbName, String tableName)
-            throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.getTable(dbName, tableName), "getTable", dbName, tableName);
-    }
-
-    @Override
-    public Table getTable(String catalogId, String dbName, String tableName) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getTable(catalogId, dbName, tableName), "getTable", catalogId, dbName, tableName);
-    }
-
-    @Override
-    public List<ColumnStatisticsObj> getTableColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> colNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getTableColumnStatistics(dbName, tableName, colNames), "getTableColumnStatistics", dbName, tableName, colNames);
-    }
-
-    @Override
-    public List<ColumnStatisticsObj> getTableColumnStatistics(
-            String catalogId,
-            String dbName,
-            String tableName,
-            List<String> colNames
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getTableColumnStatistics(catalogId, dbName, tableName, colNames), "getTableColumnStatistics", catalogId, dbName, tableName, colNames);
-    }
-
-    @Override
-    public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
-            throws MetaException, InvalidOperationException, UnknownDBException, TException {
-        return call(this.readWriteClient, client -> client.getTableObjectsByName(dbName, tableNames), "getTableObjectsByName", dbName, tableNames);
-    }
-
-    @Override
-    public List<Table> getTableObjectsByName(String catalogId, String dbName, List<String> tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException {
-        return call(this.readWriteClient, client -> client.getTableObjectsByName(catalogId, dbName, tableNames), "getTableObjectsByName", catalogId, dbName, tableNames);
-    }
-
-    @Override
-    public Materialization getMaterializationInvalidationInfo(CreationMetadata creationMetadata, String validTxnList) throws MetaException, InvalidOperationException, UnknownDBException, TException {
-        return call(this.readWriteClient, client -> client.getMaterializationInvalidationInfo(creationMetadata, validTxnList), "getMaterializationInvalidationInfo", creationMetadata, validTxnList);
-    }
-
-    @Override
-    public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws MetaException, TException {
-        run(client -> client.updateCreationMetadata(dbName, tableName, cm), "updateCreationMetadata", dbName, tableName, cm);
-    }
-
-    @Override
-    public void updateCreationMetadata(String catalogId, String dbName, String tableName, CreationMetadata cm) throws MetaException, TException {
-        run(client -> client.updateCreationMetadata(catalogId, dbName, tableName, cm), "updateCreationMetadata", catalogId, dbName, tableName, cm);
-    }
-
-    @Override
-    public List<String> getTables(String dbname, String tablePattern)
-            throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getTables(dbname, tablePattern), "getTables", dbname, tablePattern);
-    }
-
-    @Override
-    public List<String> getTables(String catalogId, String dbname, String tablePattern) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getTables(catalogId, dbname, tablePattern), "getTables", catalogId, dbname, tablePattern);
-    }
-
-    @Override
-    public List<String> getTables(String dbname, String tablePattern, TableType tableType)
-            throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getTables(dbname, tablePattern, tableType), "getTables", dbname, tablePattern, tableType);
-    }
-
-    @Override
-    public List<String> getTables(String catalogId, String dbname, String tablePattern, TableType tableType) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getTables(catalogId, dbname, tablePattern, tableType), "getTables", catalogId, dbname, tablePattern, tableType);
-    }
-
-    @Override
-    public List<String> getMaterializedViewsForRewriting(String dbName) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getMaterializedViewsForRewriting(dbName), "getMaterializedViewsForRewriting", dbName);
-    }
-
-    @Override
-    public List<String> getMaterializedViewsForRewriting(String catalogId, String dbName) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getMaterializedViewsForRewriting(catalogId, dbName), "getMaterializedViewsForRewriting", catalogId, dbName);
-    }
-
-    @Override
-    public List<TableMeta> getTableMeta(
-            String dbPatterns,
-            String tablePatterns,
-            List<String> tableTypes
-    ) throws MetaException, TException, UnknownDBException, UnsupportedOperationException {
-        return call(this.readWriteClient, client -> client.getTableMeta(dbPatterns, tablePatterns, tableTypes), "getTableMeta", dbPatterns, tablePatterns, tableTypes);
-    }
-
-    @Override
-    public List<TableMeta> getTableMeta(String catalogId, String dbPatterns, String tablePatterns, List<String> tableTypes) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.getTableMeta(catalogId, dbPatterns, tablePatterns, tableTypes), "getTableMeta", catalogId, dbPatterns, tablePatterns, tableTypes);
-    }
-
-    @Override
-    public ValidTxnList getValidTxns() throws TException {
-        return call(this.readWriteClient, client -> client.getValidTxns(), "getValidTxns");
-    }
-
-    @Override
-    public ValidTxnList getValidTxns(long currentTxn) throws TException {
-        return call(this.readWriteClient, client -> client.getValidTxns(currentTxn), "getValidTxns", currentTxn);
-    }
-
-    @Override
-    public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException {
-        return call(this.readWriteClient, client -> client.getValidWriteIds(fullTableName), "getValidWriteIds", fullTableName);
-    }
-
-    @Override
-    public List<TableValidWriteIds> getValidWriteIds(List<String> tablesList, String validTxnList) throws TException {
-        return call(this.readWriteClient, client -> client.getValidWriteIds(tablesList, validTxnList), "getValidWriteIds", tablesList, validTxnList);
-    }
-
-    @Override
-    public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef obj, String user, List<String> groups)
-            throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.get_privilege_set(obj, user, groups), "get_privilege_set", obj, user, groups);
-    }
-
-    @Override
-    public boolean grant_privileges(PrivilegeBag privileges)
-            throws MetaException, TException {
-        return call(client -> client.grant_privileges(privileges), "grant_privileges", privileges);
-    }
-
-    @Override
-    public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption)
-            throws MetaException, TException {
-        return call(client -> client.revoke_privileges(privileges, grantOption), "revoke_privileges", privileges, grantOption);
-    }
-
-    @Override
-    public boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) throws MetaException, TException {
-        return call(client -> client.refresh_privileges(objToRefresh, authorizer, grantPrivileges), "refresh_privileges", objToRefresh, authorizer, grantPrivileges);
-    }
-
-    @Override
-    public void heartbeat(long txnId, long lockId)
-            throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException {
-        run(client -> client.heartbeat(txnId, lockId), "heartbeat", txnId, lockId);
-    }
-
-    @Override
-    public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException {
-        return call(client -> client.heartbeatTxnRange(min, max), "heartbeatTxnRange", min, max);
-    }
-
-    @Override
-    public boolean isPartitionMarkedForEvent(
-            String dbName,
-            String tblName,
-            Map<String, String> partKVs,
-            PartitionEventType eventType
-    ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
-            UnknownPartitionException, InvalidPartitionException {
-        return call(this.readWriteClient, client -> client.isPartitionMarkedForEvent(dbName, tblName, partKVs, eventType), "isPartitionMarkedForEvent", dbName, tblName, partKVs, eventType);
-    }
-
-    @Override
-    public boolean isPartitionMarkedForEvent(String catalogId, String db_name, String tbl_name, Map<String, String> partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException {
-        return call(this.readWriteClient, client -> client.isPartitionMarkedForEvent(catalogId, db_name, tbl_name, partKVs, eventType), "isPartitionMarkedForEvent", catalogId, db_name, tbl_name, partKVs, eventType);
-    }
-
-    @Override
-    public List<String> listPartitionNames(String dbName, String tblName, short max)
-            throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionNames(dbName, tblName, max), "listPartitionNames", dbName, tblName, max);
-    }
-
-    @Override
-    public List<String> listPartitionNames(String catalogId, String dbName, String tblName, int max) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionNames(catalogId, dbName, tblName, max), "listPartitionNames", catalogId, dbName, tblName, max);
-    }
-
-    @Override
-    public List<String> listPartitionNames(
-            String databaseName,
-            String tableName,
-            List<String> values,
-            short max
-    ) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionNames(databaseName, tableName, values, max), "listPartitionNames", databaseName, tableName, values, max);
-    }
-
-    @Override
-    public List<String> listPartitionNames(
-            String catalogId,
-            String databaseName,
-            String tableName,
-            List<String> values,
-            int max
-    ) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionNames(catalogId, databaseName, tableName, values, max), "listPartitionNames", catalogId, databaseName, tableName, values, max);
-    }
-
-    @Override
-    public PartitionValuesResponse listPartitionValues(PartitionValuesRequest partitionValuesRequest) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionValues(partitionValuesRequest), "listPartitionValues", partitionValuesRequest);
-    }
-
-    @Override
-    public int getNumPartitionsByFilter(
-            String dbName,
-            String tableName,
-            String filter
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getNumPartitionsByFilter(dbName, tableName, filter), "getNumPartitionsByFilter", dbName, tableName, filter);
-    }
-
-    @Override
-    public int getNumPartitionsByFilter(
-            String catalogId,
-            String dbName,
-            String tableName,
-            String filter
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getNumPartitionsByFilter(catalogId, dbName, tableName, filter), "getNumPartitionsByFilter", catalogId, dbName, tableName, filter);
-    }
-
-    @Override
-    public PartitionSpecProxy listPartitionSpecs(
-            String dbName,
-            String tblName,
-            int max
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.listPartitionSpecs(dbName, tblName, max), "listPartitionSpecs", dbName, tblName, max);
-    }
-
-    @Override
-    public PartitionSpecProxy listPartitionSpecs(
-            String catalogId,
-            String dbName,
-            String tblName,
-            int max
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.listPartitionSpecs(catalogId, dbName, tblName, max), "listPartitionSpecs", catalogId, dbName, tblName, max);
-    }
-
-    @Override
-    public PartitionSpecProxy listPartitionSpecsByFilter(
-            String dbName,
-            String tblName,
-            String filter,
-            int max
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionSpecsByFilter(dbName, tblName, filter, max), "listPartitionSpecsByFilter", dbName, tblName, filter, max);
-    }
-
-    @Override
-    public PartitionSpecProxy listPartitionSpecsByFilter(
-            String catalogId,
-            String dbName,
-            String tblName,
-            String filter,
-            int max
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionSpecsByFilter(catalogId, dbName, tblName, filter, max), "listPartitionSpecsByFilter", catalogId, dbName, tblName, filter, max);
-    }
-
-    @Override
-    public List<Partition> listPartitions(String dbName, String tblName, short max)
-            throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitions(dbName, tblName, max), "listPartitions", dbName, tblName, max);
-    }
-
-    @Override
-    public List<Partition> listPartitions(String catalogId, String dbName, String tblName, int max) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitions(catalogId, dbName, tblName, max), "listPartitions", catalogId, dbName, tblName, max);
-    }
-
-    @Override
-    public List<Partition> listPartitions(
-            String databaseName,
-            String tableName,
-            List<String> values,
-            short max
-    ) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitions(databaseName, tableName, values, max), "listPartitions", databaseName, tableName, values, max);
-    }
-
-    @Override
-    public List<Partition> listPartitions(String catalogId, String databaseName, String tableName, List<String> values, int max) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.listPartitions(catalogId, databaseName, tableName, values, max), "listPartitions", catalogId, databaseName, tableName, values, max);
-    }
-
-    @Override
-    public boolean listPartitionsByExpr(
-            String databaseName,
-            String tableName,
-            byte[] expr,
-            String defaultPartitionName,
-            short max,
-            List<Partition> result
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.listPartitionsByExpr(databaseName, tableName, expr, defaultPartitionName, max, result), "listPartitionsByExpr", databaseName, tableName, expr, defaultPartitionName, max, result);
-    }
-
-    @Override
-    public boolean listPartitionsByExpr(
-            String catalogId,
-            String databaseName,
-            String tableName,
-            byte[] expr,
-            String defaultPartitionName,
-            int max,
-            List<Partition> result
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.listPartitionsByExpr(catalogId, databaseName, tableName, expr, defaultPartitionName, max, result), "listPartitionsByExpr", catalogId, databaseName, tableName, expr, defaultPartitionName, max, result);
-    }
-
-    @Override
-    public List<Partition> listPartitionsByFilter(
-            String databaseName,
-            String tableName,
-            String filter,
-            short max
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionsByFilter(databaseName, tableName, filter, max), "listPartitionsByFilter", databaseName, tableName, filter, max);
-    }
-
-    @Override
-    public List<Partition> listPartitionsByFilter(
-            String catalogId,
-            String databaseName,
-            String tableName,
-            String filter,
-            int max
-    ) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.listPartitionsByFilter(catalogId, databaseName, tableName, filter, max), "listPartitionsByFilter", catalogId, databaseName, tableName, filter, max);
-    }
-
-    @Override
-    public List<Partition> listPartitionsWithAuthInfo(
-            String database,
-            String table,
-            short maxParts,
-            String user,
-            List<String> groups
-    ) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionsWithAuthInfo(database, table, maxParts, user, groups), "listPartitionsWithAuthInfo", database, table, maxParts, user, groups);
-    }
-
-    @Override
-    public List<Partition> listPartitionsWithAuthInfo(
-            String catalogId,
-            String database,
-            String table,
-            int maxParts,
-            String user,
-            List<String> groups
-    ) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionsWithAuthInfo(catalogId, database, table, maxParts, user, groups), "listPartitionsWithAuthInfo", catalogId, database, table, maxParts, user, groups);
-    }
-
-    @Override
-    public List<Partition> listPartitionsWithAuthInfo(
-            String database,
-            String table,
-            List<String> partVals,
-            short maxParts,
-            String user,
-            List<String> groups
-    ) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionsWithAuthInfo(database, table, partVals, maxParts, user, groups), "listPartitionsWithAuthInfo", database, table, partVals, maxParts, user, groups);
-    }
-
-    @Override
-    public List<Partition> listPartitionsWithAuthInfo(String catalogId, String database, String table, List<String> partVals, int maxParts, String user, List<String> groups) throws MetaException, TException, NoSuchObjectException {
-        return call(this.readWriteClient, client -> client.listPartitionsWithAuthInfo(catalogId, database, table, partVals, maxParts, user, groups), "listPartitionsWithAuthInfo", catalogId, database, table, partVals, maxParts, user, groups);
-    }
-
-    @Override
-    public List<String> listTableNamesByFilter(
-            String dbName,
-            String filter,
-            short maxTables
-    ) throws MetaException, TException, InvalidOperationException, UnknownDBException, UnsupportedOperationException {
-        return call(this.readWriteClient, client -> client.listTableNamesByFilter(dbName, filter, maxTables), "listTableNamesByFilter", dbName, filter, maxTables);
-    }
-
-    @Override
-    public List<String> listTableNamesByFilter(String catalogId, String dbName, String filter, int maxTables) throws TException, InvalidOperationException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.listTableNamesByFilter(catalogId, dbName, filter, maxTables), "listTableNamesByFilter", catalogId, dbName, filter, maxTables);
-    }
-
-    @Override
-    public List<HiveObjectPrivilege> list_privileges(
-            String principal,
-            PrincipalType principalType,
-            HiveObjectRef objectRef
-    ) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.list_privileges(principal, principalType, objectRef), "list_privileges", principal, principalType, objectRef);
-    }
-
-    @Override
-    public LockResponse lock(LockRequest lockRequest) throws NoSuchTxnException, TxnAbortedException, TException {
-        return call(client -> client.lock(lockRequest), "lock", lockRequest);
-    }
-
-    @Override
-    public void markPartitionForEvent(
-            String dbName,
-            String tblName,
-            Map<String, String> partKVs,
-            PartitionEventType eventType
-    ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
-            UnknownPartitionException, InvalidPartitionException {
-        run(client -> client.markPartitionForEvent(dbName, tblName, partKVs, eventType), "markPartitionForEvent", dbName, tblName, partKVs, eventType);
-    }
-
-    @Override
-    public void markPartitionForEvent(
-            String catalogId,
-            String dbName,
-            String tblName,
-            Map<String, String> partKVs,
-            PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException {
-        run(client -> client.markPartitionForEvent(catalogId, dbName, tblName, partKVs, eventType), "markPartitionForEvent", catalogId, dbName, tblName, partKVs, eventType);
-    }
-
-    @Override
-    public long openTxn(String user) throws TException {
-        return call(client -> client.openTxn(user), "openTxn", user);
-    }
-
-    @Override
-    public List<Long> replOpenTxn(String replPolicy, List<Long> srcTxnIds, String user) throws TException {
-        return call(client -> client.replOpenTxn(replPolicy, srcTxnIds, user), "replOpenTxn", replPolicy, srcTxnIds, user);
-    }
-
-    @Override
-    public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
-        return call(client -> client.openTxns(user, numTxns), "openTxns", numTxns);
-    }
-
-    @Override
-    public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.partitionNameToSpec(name), "partitionNameToSpec", name);
-    }
-
-    @Override
-    public List<String> partitionNameToVals(String name) throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.partitionNameToVals(name), "partitionNameToVals", name);
-    }
-
-    @Override
-    public void renamePartition(
-            String dbName,
-            String tblName,
-            List<String> partitionValues,
-            Partition newPartition
-    ) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.renamePartition(dbName, tblName, partitionValues, newPartition), "renamePartition", dbName, tblName,
-                partitionValues, newPartition);
-    }
-
-    @Override
-    public void renamePartition(String catalogId, String dbName, String tblName, List<String> partitionValues, Partition newPartition) throws InvalidOperationException, MetaException, TException {
-        run(client -> client.renamePartition(catalogId, dbName, tblName, partitionValues, newPartition), "renamePartition", catalogId, dbName, tblName, partitionValues, newPartition);
-    }
-
-    @Override
-    public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
-        return call(client -> client.renewDelegationToken(tokenStrForm), "renewDelegationToken", tokenStrForm);
-    }
-
-    @Override
-    public void rollbackTxn(long txnId) throws NoSuchTxnException, TException {
-        run(client -> client.rollbackTxn(txnId), "rollbackTxn", txnId);
-    }
-
-    @Override
-    public void replRollbackTxn(long srcTxnId, String replPolicy) throws NoSuchTxnException, TException {
-        run(client -> client.replRollbackTxn(srcTxnId, replPolicy), "replRollbackTxn", srcTxnId, replPolicy);
-    }
-
-    @Override
-    public void setMetaConf(String key, String value) throws MetaException, TException {
-        run(client -> client.setMetaConf(key, value), "setMetaConf", key, value);
-    }
-
-    @Override
-    public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
-            throws NoSuchObjectException, InvalidObjectException,
-            MetaException, TException, InvalidInputException {
-        if (request.getColStatsSize() == 1) {
-            ColumnStatistics colStats = request.getColStatsIterator().next();
-            ColumnStatisticsDesc desc = colStats.getStatsDesc();
-            String dbName = desc.getDbName().toLowerCase();
-            String tableName = desc.getTableName().toLowerCase();
-            if (getTempTable(dbName, tableName) != null) {
-                return call(this.readWriteClient, client -> client.setPartitionColumnStatistics(request), "setPartitionColumnStatistics", request);
-            }
-        }
-        SetPartitionsStatsRequest deepCopy = request.deepCopy();
-        boolean result = readWriteClient.setPartitionColumnStatistics(deepCopy);
-        if (extraClient.isPresent()) {
-            try {
-                extraClient.get().setPartitionColumnStatistics(request);
-            } catch (Exception e) {
-                FunctionalUtils.collectLogs(e, "setPartitionColumnStatistics", request);
-                if (!allowFailure) {
-                    throw e;
-                }
-            }
-        }
-        return result;
-    }
-
-    @Override
-    public void flushCache() {
-        try {
-            run(client -> client.flushCache(), "flushCache");
-        } catch (TException e) {
-            logger.info(e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public Iterable<Map.Entry<Long, ByteBuffer>> getFileMetadata(List<Long> fileIds) throws TException {
-        return call(this.readWriteClient, client -> client.getFileMetadata(fileIds), "getFileMetadata", fileIds);
-    }
-
-    @Override
-    public Iterable<Map.Entry<Long, MetadataPpdResult>> getFileMetadataBySarg(
-            List<Long> fileIds,
-            ByteBuffer sarg,
-            boolean doGetFooters
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.getFileMetadataBySarg(fileIds, sarg, doGetFooters), "getFileMetadataBySarg", fileIds, sarg, doGetFooters);
-    }
-
-    @Override
-    public void clearFileMetadata(List<Long> fileIds) throws TException {
-        run(client -> client.clearFileMetadata(fileIds), "clearFileMetadata", fileIds);
-    }
-
-    @Override
-    public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws TException {
-        run(client -> client.putFileMetadata(fileIds, metadata), "putFileMetadata", fileIds, metadata);
-    }
-
-    @Override
-    public boolean isSameConfObj(Configuration conf) {
-        try {
-            return call(this.readWriteClient, client -> client.isSameConfObj(conf), "isSameConfObj", conf);
-        } catch (TException e) {
-            logger.error(e.getMessage(), e);
-        }
-        return false;
-    }
-
-    @Override
-    public boolean cacheFileMetadata(
-            String dbName,
-            String tblName,
-            String partName,
-            boolean allParts
-    ) throws TException {
-        return call(client -> client.cacheFileMetadata(dbName, tblName, partName, allParts), "cacheFileMetadata", dbName, tblName, partName, allParts);
-    }
-
-    @Override
-    public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest primaryKeysRequest)
-            throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getPrimaryKeys(primaryKeysRequest), "getPrimaryKeys", primaryKeysRequest);
-    }
-
-    @Override
-    public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest foreignKeysRequest)
-            throws MetaException, NoSuchObjectException, TException {
-        // PrimaryKeys are currently unsupported
-        //return null to allow DESCRIBE (FORMATTED | EXTENDED)
-        return call(this.readWriteClient, client -> client.getForeignKeys(foreignKeysRequest), "getForeignKeys", foreignKeysRequest);
-    }
-
-    @Override
-    public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest uniqueConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getUniqueConstraints(uniqueConstraintsRequest), "getUniqueConstraints", uniqueConstraintsRequest);
-    }
-
-    @Override
-    public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest notNullConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getNotNullConstraints(notNullConstraintsRequest), "getNotNullConstraints", notNullConstraintsRequest);
-    }
-
-    @Override
-    public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest defaultConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getDefaultConstraints(defaultConstraintsRequest), "getDefaultConstraints", defaultConstraintsRequest);
-    }
-
-    @Override
-    public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest checkConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-        return call(this.readWriteClient, client -> client.getCheckConstraints(checkConstraintsRequest), "getCheckConstraints", checkConstraintsRequest);
-    }
-
-    @Override
-    public void createTableWithConstraints(
-            Table tbl,
-            List<SQLPrimaryKey> primaryKeys,
-            List<SQLForeignKey> foreignKeys,
-            List<SQLUniqueConstraint> uniqueConstraints,
-            List<SQLNotNullConstraint> notNullConstraints,
-            List<SQLDefaultConstraint> defaultConstraints,
-            List<SQLCheckConstraint> checkConstraints
-    ) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException {
-        run(client -> client.createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints), "createTableWithConstraints", tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
-    }
-
-    @Override
-    public void dropConstraint(
-            String dbName,
-            String tblName,
-            String constraintName
-    ) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.dropConstraint(dbName, tblName, constraintName), "dropConstraint", dbName, tblName, constraintName);
-    }
-
-    @Override
-    public void dropConstraint(String catalogId, String dbName, String tableName, String constraintName) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.dropConstraint(catalogId, dbName, tableName, constraintName), "dropConstraint", catalogId, dbName, tableName, constraintName);
-    }
-
-    @Override
-    public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols)
-            throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addPrimaryKey(primaryKeyCols), "addPrimaryKey", primaryKeyCols);
-    }
-
-    @Override
-    public void addForeignKey(List<SQLForeignKey> foreignKeyCols)
-            throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addForeignKey(foreignKeyCols), "addForeignKey", foreignKeyCols);
-    }
-
-    @Override
-    public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addUniqueConstraint(uniqueConstraintCols), "addUniqueConstraint", uniqueConstraintCols);
-    }
-
-    @Override
-    public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addNotNullConstraint(notNullConstraintCols), "addNotNullConstraint", notNullConstraintCols);
-    }
-
-    @Override
-    public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addDefaultConstraint(defaultConstraints), "addDefaultConstraint", defaultConstraints);
-    }
-
-    @Override
-    public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws MetaException, NoSuchObjectException, TException {
-        run(client -> client.addCheckConstraint(checkConstraints), "addCheckConstraint", checkConstraints);
-    }
-
-    @Override
-    public String getMetastoreDbUuid() throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getMetastoreDbUuid(), "getMetastoreDbUuid");
-    }
-
-    @Override
-    public void createResourcePlan(WMResourcePlan wmResourcePlan, String copyFromName) throws InvalidObjectException, MetaException, TException {
-        run(client -> client.createResourcePlan(wmResourcePlan, copyFromName), "createResourcePlan", wmResourcePlan, copyFromName);
-    }
-
-    @Override
-    public WMFullResourcePlan getResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getResourcePlan(resourcePlanName), "getResourcePlan", resourcePlanName);
-    }
-
-    @Override
-    public List<WMResourcePlan> getAllResourcePlans() throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getAllResourcePlans(), "getAllResourcePlans");
-    }
-
-    @Override
-    public void dropResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException {
-        run(client -> client.dropResourcePlan(resourcePlanName), "dropResourcePlan", resourcePlanName);
-    }
-
-    @Override
-    public WMFullResourcePlan alterResourcePlan(String resourcePlanName, WMNullableResourcePlan wmNullableResourcePlan, boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        return call(client -> client.alterResourcePlan(resourcePlanName, wmNullableResourcePlan, canActivateDisabled, isForceDeactivate, isReplace), "alterResourcePlan", resourcePlanName, wmNullableResourcePlan, canActivateDisabled, isForceDeactivate, isReplace);
-    }
-
-    @Override
-    public WMFullResourcePlan getActiveResourcePlan() throws MetaException, TException {
-        return call(this.readWriteClient, client -> client.getActiveResourcePlan(), "getActiveResourcePlan");
-    }
-
-    @Override
-    public WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.validateResourcePlan(resourcePlanName), "validateResourcePlan", resourcePlanName);
-    }
-
-    @Override
-    public void createWMTrigger(WMTrigger wmTrigger) throws InvalidObjectException, MetaException, TException {
-        run(client -> client.createWMTrigger(wmTrigger), "createWMTrigger", wmTrigger);
-    }
-
-    @Override
-    public void alterWMTrigger(WMTrigger wmTrigger) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        run(client -> client.alterWMTrigger(wmTrigger), "alterWMTrigger", wmTrigger);
-    }
-
-    @Override
-    public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException, TException {
-        run(client -> client.dropWMTrigger(resourcePlanName, triggerName), "dropWMTrigger", resourcePlanName, triggerName);
-    }
-
-    @Override
-    public List<WMTrigger> getTriggersForResourcePlan(String resourcePlan) throws NoSuchObjectException, MetaException, TException {
-        return call(this.readWriteClient, client -> client.getTriggersForResourcePlan(resourcePlan), "getTriggersForResourcePlan", resourcePlan);
-    }
-
-    @Override
-    public void createWMPool(WMPool wmPool) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        run(client -> client.createWMPool(wmPool), "createWMPool", wmPool);
-    }
-
-    @Override
-    public void alterWMPool(WMNullablePool wmNullablePool, String poolPath) throws NoSuchObjectException, InvalidObjectException, TException {
-        run(client -> client.alterWMPool(wmNullablePool, poolPath), "alterWMPool", wmNullablePool, poolPath);
-    }
-
-    @Override
-    public void dropWMPool(String resourcePlanName, String poolPath) throws TException {
-        run(client -> client.dropWMPool(resourcePlanName, poolPath), "dropWMPool", resourcePlanName, poolPath);
-    }
-
-    @Override
-    public void createOrUpdateWMMapping(WMMapping wmMapping, boolean isUpdate) throws TException {
-        run(client -> client.createOrUpdateWMMapping(wmMapping, isUpdate), "createOrUpdateWMMapping", wmMapping, isUpdate);
-    }
-
-    @Override
-    public void dropWMMapping(WMMapping wmMapping) throws TException {
-        run(client -> client.dropWMMapping(wmMapping), "dropWMMapping", wmMapping);
-    }
-
-    @Override
-    public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException {
-        run(client -> client.createOrDropTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, shouldDrop), "createOrDropTriggerToPoolMapping", resourcePlanName, triggerName, poolPath, shouldDrop);
-    }
-
-    @Override
-    public void createISchema(ISchema iSchema) throws TException {
-        run(client -> client.createISchema(iSchema), "createISchema", iSchema);
-    }
-
-    @Override
-    public void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException {
-        run(client -> client.alterISchema(catName, dbName, schemaName, newSchema), "alterISchema", catName, dbName, schemaName, newSchema);
-    }
-
-    @Override
-    public ISchema getISchema(String catalogId, String dbName, String name) throws TException {
-        return call(this.readWriteClient, client -> client.getISchema(catalogId, dbName, name), "getISchema", catalogId, dbName, name);
-    }
-
-    @Override
-    public void dropISchema(String catalogId, String dbName, String name) throws TException {
-        run(client -> client.dropISchema(catalogId, dbName, name), "dropISchema", catalogId, dbName, name);
-    }
-
-    @Override
-    public void addSchemaVersion(SchemaVersion schemaVersion) throws TException {
-        run(client -> client.addSchemaVersion(schemaVersion), "addSchemaVersion", schemaVersion);
-    }
-
-    @Override
-    public SchemaVersion getSchemaVersion(String catalogId, String dbName, String schemaName, int version) throws TException {
-        return call(this.readWriteClient, client -> client.getSchemaVersion(catalogId, dbName, schemaName, version), "getSchemaVersion", catalogId, dbName, schemaName, version);
-    }
-
-    @Override
-    public SchemaVersion getSchemaLatestVersion(String catalogId, String dbName, String schemaName) throws TException {
-        return call(this.readWriteClient, client -> client.getSchemaLatestVersion(catalogId, dbName, schemaName), "getSchemaLatestVersion", catalogId, dbName, schemaName);
-    }
-
-    @Override
-    public List<SchemaVersion> getSchemaAllVersions(String catalogId, String dbName, String schemaName) throws TException {
-        return call(this.readWriteClient, client -> client.getSchemaAllVersions(catalogId, dbName, schemaName), "getSchemaAllVersions", catalogId, dbName, schemaName);
-    }
-
-    @Override
-    public void dropSchemaVersion(String catalogId, String dbName, String schemaName, int version) throws TException {
-        run(client -> client.dropSchemaVersion(catalogId, dbName, schemaName, version), "dropSchemaVersion", catalogId, dbName, schemaName, version);
-    }
-
-    @Override
-    public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst findSchemasByColsRqst) throws TException {
-        return call(this.readWriteClient, client -> client.getSchemaByCols(findSchemasByColsRqst), "getSchemaByCols", findSchemasByColsRqst);
-    }
-
-    @Override
-    public void mapSchemaVersionToSerde(String catalogId, String dbName, String schemaName, int version, String serdeName) throws TException {
-        run(client -> client.mapSchemaVersionToSerde(catalogId, dbName, schemaName, version, serdeName), "mapSchemaVersionToSerde", catalogId, dbName, schemaName, version, serdeName);
-    }
-
-    @Override
-    public void setSchemaVersionState(String catalogId, String dbName, String schemaName, int version, SchemaVersionState state) throws TException {
-        run(client -> client.setSchemaVersionState(catalogId, dbName, schemaName, version, state), "setSchemaVersionState", catalogId, dbName, schemaName, version, state);
-    }
-
-    @Override
-    public void addSerDe(SerDeInfo serDeInfo) throws TException {
-        run(client -> client.addSerDe(serDeInfo), "addSerDe", serDeInfo);
-    }
-
-    @Override
-    public SerDeInfo getSerDe(String serDeName) throws TException {
-        return call(this.readWriteClient, client -> client.getSerDe(serDeName), "getSerDe", serDeName);
-    }
-
-    @Override
-    public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException {
-        return call(client -> client.lockMaterializationRebuild(dbName, tableName, txnId), "lockMaterializationRebuild", dbName, tableName, txnId);
-    }
-
-    @Override
-    public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException {
-        return call(client -> client.heartbeatLockMaterializationRebuild(dbName, tableName, txnId), "heartbeatLockMaterializationRebuild", dbName, tableName, txnId);
-    }
-
-    @Override
-    public void addRuntimeStat(RuntimeStat runtimeStat) throws TException {
-        run(client -> client.addRuntimeStat(runtimeStat), "addRuntimeStat", runtimeStat);
-    }
-
-    @Override
-    public List<RuntimeStat> getRuntimeStats(int maxWeight, int maxCreateTime) throws TException {
-        return call(this.readWriteClient, client -> client.getRuntimeStats(maxWeight, maxCreateTime), "getRuntimeStats", maxWeight, maxCreateTime);
-    }
-
-    @Override
-    public ShowCompactResponse showCompactions() throws TException {
-        return call(this.readWriteClient, client -> client.showCompactions(), "showCompactions");
-    }
-
-    @Override
-    public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List<String> partNames) throws TException {
-        run(client -> client.addDynamicPartitions(txnId, writeId, dbName, tableName, partNames), "addDynamicPartitions", txnId, writeId, dbName, tableName, partNames);
-    }
-
-    @Override
-    public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List<String> partNames, DataOperationType operationType) throws TException {
-        run(client -> client.addDynamicPartitions(txnId, writeId, dbName, tableName, partNames, operationType), "addDynamicPartitions", txnId, writeId, dbName, tableName, partNames, operationType);
-    }
-
-    @Override
-    public void insertTable(Table table, boolean overwrite) throws MetaException {
-        try {
-            run(client -> client.insertTable(table, overwrite), "insertTable", table, overwrite);
-        } catch (TException e) {
-            throw DataLakeUtil.throwException(new MetaException(e.getMessage()), e);
-        }
-    }
-
-    @Override
-    public NotificationEventResponse getNextNotification(
-            long lastEventId,
-            int maxEvents,
-            NotificationFilter notificationFilter
-    ) throws TException {
-        return call(this.readWriteClient, client -> client.getNextNotification(lastEventId, maxEvents, notificationFilter), "getNextNotification", lastEventId, maxEvents, notificationFilter);
-    }
-
-    @Override
-    public CurrentNotificationEventId getCurrentNotificationEventId() throws TException {
-        return call(this.readWriteClient, client -> client.getCurrentNotificationEventId(), "getCurrentNotificationEventId");
-    }
-
-    @Override
-    public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest notificationEventsCountRequest) throws TException {
-        return call(this.readWriteClient, client -> client.getNotificationEventsCount(notificationEventsCountRequest), "getNotificationEventsCount", notificationEventsCountRequest);
-    }
-
-    @Override
-    public FireEventResponse fireListenerEvent(FireEventRequest fireEventRequest) throws TException {
-        return call(this.readWriteClient, client -> client.fireListenerEvent(fireEventRequest), "fireListenerEvent", fireEventRequest);
-    }
-
-    @Override
-    @Deprecated
-    public ShowLocksResponse showLocks() throws TException {
-        return call(this.readWriteClient, client -> client.showLocks(), "showLocks");
-    }
-
-    @Override
-    public ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException {
-        return call(this.readWriteClient, client -> client.showLocks(showLocksRequest), "showLocks", showLocksRequest);
-    }
-
-    @Override
-    public GetOpenTxnsInfoResponse showTxns() throws TException {
-        return call(this.readWriteClient, client -> client.showTxns(), "showTxns");
-    }
-
-    @Override
-    public boolean tableExists(String databaseName, String tableName)
-            throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.tableExists(databaseName, tableName), "tableExists", databaseName, tableName);
-    }
-
-    @Override
-    public boolean tableExists(String catalogId, String databaseName, String tableName) throws MetaException, TException, UnknownDBException {
-        return call(this.readWriteClient, client -> client.tableExists(catalogId, databaseName, tableName), "tableExists", catalogId, databaseName, tableName);
-    }
-
-    @Override
-    public void unlock(long lockId) throws NoSuchLockException, TxnOpenException, TException {
-        run(client -> client.unlock(lockId), "unlock", lockId);
-    }
-
-    @Override
-    public boolean updatePartitionColumnStatistics(ColumnStatistics columnStatistics)
-            throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException {
-        return call(client -> client.updatePartitionColumnStatistics(columnStatistics), "updatePartitionColumnStatistics", columnStatistics);
-    }
-
-    @Override
-    public boolean updateTableColumnStatistics(ColumnStatistics columnStatistics)
-            throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException {
-        if (getTempTable(columnStatistics.getStatsDesc().getDbName(), columnStatistics.getStatsDesc().getTableName()) != null) {
-            return call(this.readWriteClient, client -> client.updateTableColumnStatistics(columnStatistics), "updateTableColumnStatistics", columnStatistics);
-        } else {
-            return call(client -> client.updateTableColumnStatistics(columnStatistics), "updateTableColumnStatistics", columnStatistics);
-        }
-    }
-
-    @Override
-    public void validatePartitionNameCharacters(List<String> part_vals) throws TException, MetaException {
-        run(this.readWriteClient, client -> client.validatePartitionNameCharacters(part_vals), "validatePartitionNameCharacters", part_vals);
-    }
-
-    @VisibleForTesting
-    public IMetaStoreClient getDlfSessionMetaStoreClient() {
-        return dlfSessionMetaStoreClient;
-    }
-
-    @VisibleForTesting
-    public IMetaStoreClient getHiveSessionMetaStoreClient() {
-        return hiveSessionMetaStoreClient;
-    }
-
-    @VisibleForTesting
-    boolean isAllowFailure() {
-        return allowFailure;
-    }
-
-    public void run(ThrowingConsumer<IMetaStoreClient, TException> consumer, String actionName, Object... parameters) throws TException {
-        FunctionalUtils.run(this.readWriteClient, extraClient, allowFailure, consumer, this.readWriteClientType, actionName, parameters);
-    }
-
-    public void run(IMetaStoreClient client, ThrowingConsumer<IMetaStoreClient, TException> consumer,
-            String actionName, Object... parameters) throws TException {
-        FunctionalUtils.run(client, Optional.empty(), allowFailure, consumer, this.readWriteClientType, actionName, parameters);
-    }
-
-    public <R> R call(ThrowingFunction<IMetaStoreClient, R, TException> consumer,
-            String actionName, Object... parameters) throws TException {
-        return FunctionalUtils.call(this.readWriteClient, extraClient, allowFailure, consumer,
-                this.readWriteClientType, actionName, parameters);
-    }
-
-    public <R> R call(IMetaStoreClient client, ThrowingFunction<IMetaStoreClient, R, TException> consumer,
-            String actionName, Object... parameters) throws TException {
-        return FunctionalUtils.call(client, Optional.empty(), allowFailure, consumer, this.readWriteClientType,
-                actionName, parameters);
-    }
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/BaseCatalogToHiveConverter.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/BaseCatalogToHiveConverter.java
deleted file mode 100644
index 6c788ddeb8..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/BaseCatalogToHiveConverter.java
+++ /dev/null
@@ -1,541 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.services.glue.model.BinaryColumnStatisticsData;
-import com.amazonaws.services.glue.model.BooleanColumnStatisticsData;
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ColumnStatisticsType;
-import com.amazonaws.services.glue.model.DateColumnStatisticsData;
-import com.amazonaws.services.glue.model.DecimalColumnStatisticsData;
-import com.amazonaws.services.glue.model.DoubleColumnStatisticsData;
-import com.amazonaws.services.glue.model.ErrorDetail;
-import com.amazonaws.services.glue.model.LongColumnStatisticsData;
-import com.amazonaws.services.glue.model.StringColumnStatisticsData;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import static org.apache.commons.lang3.ObjectUtils.firstNonNull;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DateColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-
-public class BaseCatalogToHiveConverter implements CatalogToHiveConverter {
-
-  private static final Logger logger = Logger.getLogger(BaseCatalogToHiveConverter.class);
-
-  private static final ImmutableMap<String, HiveException> EXCEPTION_MAP = ImmutableMap.<String, HiveException>builder()
-      .put("AlreadyExistsException", new HiveException() {
-        public TException get(String msg) {
-          return new AlreadyExistsException(msg);
-        }
-      })
-      .put("InvalidInputException", new HiveException() {
-        public TException get(String msg) {
-          return new InvalidObjectException(msg);
-        }
-      })
-      .put("InternalServiceException", new HiveException() {
-        public TException get(String msg) {
-          return new MetaException(msg);
-        }
-      })
-      .put("ResourceNumberLimitExceededException", new HiveException() {
-        public TException get(String msg) {
-          return new MetaException(msg);
-        }
-      })
-      .put("OperationTimeoutException", new HiveException() {
-        public TException get(String msg) {
-          return new MetaException(msg);
-        }
-      })
-      .put("EntityNotFoundException", new HiveException() {
-        public TException get(String msg) {
-          return new NoSuchObjectException(msg);
-        }
-      })
-      .build();
-
-  interface HiveException {
-    TException get(String msg);
-  }
-
-  public TException wrapInHiveException(Throwable e) {
-    return getHiveException(e.getClass().getSimpleName(), e.getMessage());
-  }
-
-  public TException errorDetailToHiveException(ErrorDetail errorDetail) {
-    return getHiveException(errorDetail.getErrorCode(), errorDetail.getErrorMessage());
-  }
-
-  private TException getHiveException(String errorName, String errorMsg) {
-    if (EXCEPTION_MAP.containsKey(errorName)) {
-      return EXCEPTION_MAP.get(errorName).get(errorMsg);
-    } else {
-      logger.warn("Hive Exception type not found for " + errorName);
-      return new MetaException(errorMsg);
-    }
-  }
-
-  public Database convertDatabase(com.amazonaws.services.glue.model.Database catalogDatabase) {
-    Database hiveDatabase = new Database();
-    hiveDatabase.setName(catalogDatabase.getName());
-    hiveDatabase.setDescription(catalogDatabase.getDescription());
-    String location = catalogDatabase.getLocationUri();
-    hiveDatabase.setLocationUri(location == null ? "" : location);
-    hiveDatabase.setParameters(firstNonNull(catalogDatabase.getParameters(), Maps.<String, String>newHashMap()));
-    return hiveDatabase;
-  }
-
-  public FieldSchema convertFieldSchema(com.amazonaws.services.glue.model.Column catalogFieldSchema) {
-    FieldSchema hiveFieldSchema = new FieldSchema();
-    hiveFieldSchema.setType(catalogFieldSchema.getType());
-    hiveFieldSchema.setName(catalogFieldSchema.getName());
-    hiveFieldSchema.setComment(catalogFieldSchema.getComment());
-
-    return hiveFieldSchema;
-  }
-
-  public List<FieldSchema> convertFieldSchemaList(List<com.amazonaws.services.glue.model.Column> catalogFieldSchemaList) {
-    List<FieldSchema> hiveFieldSchemaList = new ArrayList<>();
-    if (catalogFieldSchemaList == null) {
-      return hiveFieldSchemaList;
-    }
-    for (com.amazonaws.services.glue.model.Column catalogFieldSchema : catalogFieldSchemaList){
-      hiveFieldSchemaList.add(convertFieldSchema(catalogFieldSchema));
-    }
-
-    return hiveFieldSchemaList;
-  }
-
-  public Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) {
-    Table hiveTable = new Table();
-    hiveTable.setDbName(dbname);
-    hiveTable.setTableName(catalogTable.getName());
-    Date createTime = catalogTable.getCreateTime();
-    hiveTable.setCreateTime(createTime == null ? 0 : (int) (createTime.getTime() / 1000));
-    hiveTable.setOwner(catalogTable.getOwner());
-    Date lastAccessedTime = catalogTable.getLastAccessTime();
-    hiveTable.setLastAccessTime(lastAccessedTime == null ? 0 : (int) (lastAccessedTime.getTime() / 1000));
-    hiveTable.setRetention(catalogTable.getRetention());
-    hiveTable.setSd(convertStorageDescriptor(catalogTable.getStorageDescriptor()));
-    hiveTable.setPartitionKeys(convertFieldSchemaList(catalogTable.getPartitionKeys()));
-    // Hive may throw a NPE during dropTable if the parameter map is null.
-    Map<String, String> parameterMap = catalogTable.getParameters();
-    if (parameterMap == null) {
-      parameterMap = Maps.newHashMap();
-    }
-    hiveTable.setParameters(parameterMap);
-    hiveTable.setViewOriginalText(catalogTable.getViewOriginalText());
-    hiveTable.setViewExpandedText(catalogTable.getViewExpandedText());
-    hiveTable.setTableType(catalogTable.getTableType());
-
-    return hiveTable;
-  }
-
-  public TableMeta convertTableMeta(com.amazonaws.services.glue.model.Table catalogTable, String dbName) {
-    TableMeta tableMeta = new TableMeta();
-    tableMeta.setDbName(dbName);
-    tableMeta.setTableName(catalogTable.getName());
-    tableMeta.setTableType(catalogTable.getTableType());
-    if (catalogTable.getParameters().containsKey("comment")) {
-      tableMeta.setComments(catalogTable.getParameters().get("comment"));
-    }
-    return tableMeta;
-  }
-
-  public StorageDescriptor convertStorageDescriptor(com.amazonaws.services.glue.model.StorageDescriptor catalogSd) {
-    StorageDescriptor hiveSd = new StorageDescriptor();
-    hiveSd.setCols(convertFieldSchemaList(catalogSd.getColumns()));
-    hiveSd.setLocation(catalogSd.getLocation());
-    hiveSd.setInputFormat(catalogSd.getInputFormat());
-    hiveSd.setOutputFormat(catalogSd.getOutputFormat());
-    hiveSd.setCompressed(catalogSd.getCompressed());
-    hiveSd.setNumBuckets(catalogSd.getNumberOfBuckets());
-    hiveSd.setSerdeInfo(convertSerDeInfo(catalogSd.getSerdeInfo()));
-    hiveSd.setBucketCols(firstNonNull(catalogSd.getBucketColumns(), Lists.<String>newArrayList()));
-    hiveSd.setSortCols(convertOrderList(catalogSd.getSortColumns()));
-    hiveSd.setParameters(firstNonNull(catalogSd.getParameters(), Maps.<String, String>newHashMap()));
-    hiveSd.setSkewedInfo(convertSkewedInfo(catalogSd.getSkewedInfo()));
-    hiveSd.setStoredAsSubDirectories(catalogSd.getStoredAsSubDirectories());
-
-    return hiveSd;
-  }
-
-  public Order convertOrder(com.amazonaws.services.glue.model.Order catalogOrder) {
-    Order hiveOrder = new Order();
-    hiveOrder.setCol(catalogOrder.getColumn());
-    hiveOrder.setOrder(catalogOrder.getSortOrder());
-
-    return hiveOrder;
-  }
-
-  public List<Order> convertOrderList(List<com.amazonaws.services.glue.model.Order> catalogOrderList) {
-    List<Order> hiveOrderList = new ArrayList<>();
-    if (catalogOrderList == null) {
-      return hiveOrderList;
-    }
-    for (com.amazonaws.services.glue.model.Order catalogOrder : catalogOrderList){
-      hiveOrderList.add(convertOrder(catalogOrder));
-    }
-
-    return hiveOrderList;
-  }
-
-  public SerDeInfo convertSerDeInfo(com.amazonaws.services.glue.model.SerDeInfo catalogSerDeInfo){
-    SerDeInfo hiveSerDeInfo = new SerDeInfo();
-    hiveSerDeInfo.setName(catalogSerDeInfo.getName());
-    hiveSerDeInfo.setParameters(firstNonNull(catalogSerDeInfo.getParameters(), Maps.<String, String>newHashMap()));
-    hiveSerDeInfo.setSerializationLib(catalogSerDeInfo.getSerializationLibrary());
-
-    return hiveSerDeInfo;
-  }
-
-  public SkewedInfo convertSkewedInfo(com.amazonaws.services.glue.model.SkewedInfo catalogSkewedInfo) {
-    if (catalogSkewedInfo == null) {
-      return null;
-    }
-    
-    SkewedInfo hiveSkewedInfo = new SkewedInfo();
-    hiveSkewedInfo.setSkewedColNames(firstNonNull(catalogSkewedInfo.getSkewedColumnNames(), Lists.<String>newArrayList()));
-    hiveSkewedInfo.setSkewedColValues(convertSkewedValue(catalogSkewedInfo.getSkewedColumnValues()));
-    hiveSkewedInfo.setSkewedColValueLocationMaps(convertSkewedMap(catalogSkewedInfo.getSkewedColumnValueLocationMaps()));
-    return hiveSkewedInfo;
-  }
-
-  public Partition convertPartition(com.amazonaws.services.glue.model.Partition src) {
-	  Partition tgt = new Partition();
-	  Date createTime = src.getCreationTime();
-	  if (createTime != null) {
-		  tgt.setCreateTime((int) (createTime.getTime() / 1000)); 
-		  tgt.setCreateTimeIsSet(true);
-	  } else {
-		  tgt.setCreateTimeIsSet(false);
-	  }
-	  String dbName = src.getDatabaseName();
-	  if (dbName != null) {
-		  tgt.setDbName(dbName);
-		  tgt.setDbNameIsSet(true);
-	  } else {
-		  tgt.setDbNameIsSet(false);
-	  }
-	  Date lastAccessTime = src.getLastAccessTime();
-	  if (lastAccessTime != null) {
-		  tgt.setLastAccessTime((int) (lastAccessTime.getTime() / 1000));
-		  tgt.setLastAccessTimeIsSet(true);
-	  } else {
-		  tgt.setLastAccessTimeIsSet(false);
-	  }
-	  Map<String, String> params = src.getParameters();
-	  
-	  // A null parameter map causes Hive to throw a NPE
-	  // so ensure we do not return a Partition object with a null parameter map.
-	  if (params == null) {
-	    params = Maps.newHashMap();
-	  }
-	  
-	  tgt.setParameters(params);
-	  tgt.setParametersIsSet(true);
-	  
-	  String tableName = src.getTableName();
-	  if (tableName != null) {
-		  tgt.setTableName(tableName);
-		  tgt.setTableNameIsSet(true);
-	  } else {
-		  tgt.setTableNameIsSet(false);
-	  }
-	  
-	  List<String> values = src.getValues();
-	  if (values != null) {
-		  tgt.setValues(values);
-		  tgt.setValuesIsSet(true);
-	  } else {
-		  tgt.setValuesIsSet(false);
-	  }
-	  
-	  com.amazonaws.services.glue.model.StorageDescriptor sd = src.getStorageDescriptor();
-	  if (sd != null) {
-		  StorageDescriptor hiveSd = convertStorageDescriptor(sd);
-		  tgt.setSd(hiveSd);
-		  tgt.setSdIsSet(true);
-	  } else {
-		  tgt.setSdIsSet(false);
-	  }
-	  
-	  return tgt;
-  }
-
-  public List<Partition> convertPartitions(List<com.amazonaws.services.glue.model.Partition> src) {
-    if (src == null) {
-      return null;
-    }
-
-    List<Partition> target = Lists.newArrayList();
-    for (com.amazonaws.services.glue.model.Partition partition : src) {
-      target.add(convertPartition(partition));
-    }
-    return target;
-  }
-
-  public List<String> convertStringToList(final String s) {
-    if (s == null) {
-      return null;
-    }
-    List<String> listString = new ArrayList<>();
-    for (int i = 0; i < s.length();) {
-      StringBuilder length = new StringBuilder();
-      for (int j = i; j < s.length(); j++){
-        if (s.charAt(j) != '$') {
-          length.append(s.charAt(j));
-        } else {
-          int lengthOfString = Integer.valueOf(length.toString());
-          listString.add(s.substring(j + 1, j + 1 + lengthOfString));
-          i = j + 1 + lengthOfString;
-          break;
-        }
-      }
-    }
-    return listString;
-  }
-
-  @Nonnull
-  public Map<List<String>, String> convertSkewedMap(final @Nullable Map<String, String> catalogSkewedMap) {
-    Map<List<String>, String> skewedMap = new HashMap<>();
-    if (catalogSkewedMap == null){
-      return skewedMap;
-    }
-
-    for (String coralKey : catalogSkewedMap.keySet()) {
-      skewedMap.put(convertStringToList(coralKey), catalogSkewedMap.get(coralKey));
-    }
-    return skewedMap;
-  }
-
-  @Nonnull
-  public List<List<String>> convertSkewedValue(final @Nullable List<String> catalogSkewedValue) {
-    List<List<String>> skewedValues = new ArrayList<>();
-    if (catalogSkewedValue == null){
-      return skewedValues;
-    }
-
-    for (String skewValue : catalogSkewedValue) {
-      skewedValues.add(convertStringToList(skewValue));
-    }
-    return skewedValues;
-  }
-  
-  public PrincipalType convertPrincipalType(com.amazonaws.services.glue.model.PrincipalType catalogPrincipalType) {
-    if(catalogPrincipalType == null) {
-      return null;
-    }
-    
-    if(catalogPrincipalType == com.amazonaws.services.glue.model.PrincipalType.GROUP) {
-      return PrincipalType.GROUP;
-    } else if(catalogPrincipalType == com.amazonaws.services.glue.model.PrincipalType.USER) {
-      return PrincipalType.USER;
-    } else if(catalogPrincipalType == com.amazonaws.services.glue.model.PrincipalType.ROLE) {
-      return PrincipalType.ROLE;
-    }
-    throw new RuntimeException("Unknown principal type:" + catalogPrincipalType.name());
-  }
-
-  public Function convertFunction(final String dbName,
-                                  final com.amazonaws.services.glue.model.UserDefinedFunction catalogFunction) {
-    if (catalogFunction ==  null) {
-      return null;
-    }
-    Function hiveFunction = new Function();
-    hiveFunction.setClassName(catalogFunction.getClassName());
-    if (catalogFunction.getCreateTime() != null) {
-      //AWS Glue can return function with null create time
-      hiveFunction.setCreateTime((int) (catalogFunction.getCreateTime().getTime() / 1000));
-    }
-    hiveFunction.setDbName(dbName);
-    hiveFunction.setFunctionName(catalogFunction.getFunctionName());
-    hiveFunction.setFunctionType(FunctionType.JAVA);
-    hiveFunction.setOwnerName(catalogFunction.getOwnerName());
-    hiveFunction.setOwnerType(convertPrincipalType(com.amazonaws.services.glue.model.PrincipalType.fromValue(catalogFunction.getOwnerType())));
-    hiveFunction.setResourceUris(convertResourceUriList(catalogFunction.getResourceUris()));
-    return hiveFunction;
-  }
-
-  public List<ResourceUri> convertResourceUriList(
-          final List<com.amazonaws.services.glue.model.ResourceUri> catalogResourceUriList) {
-    if (catalogResourceUriList == null) {
-      return null;
-    }
-    List<ResourceUri> hiveResourceUriList = new ArrayList<>();
-    for (com.amazonaws.services.glue.model.ResourceUri catalogResourceUri : catalogResourceUriList) {
-      ResourceUri hiveResourceUri = new ResourceUri();
-      hiveResourceUri.setUri(catalogResourceUri.getUri());
-      if (catalogResourceUri.getResourceType() != null) {
-        hiveResourceUri.setResourceType(ResourceType.valueOf(catalogResourceUri.getResourceType()));
-      }
-      hiveResourceUriList.add(hiveResourceUri);
-    }
-
-    return hiveResourceUriList;
-  }
-
-  public List<ColumnStatisticsObj> convertColumnStatisticsList(List<ColumnStatistics> catatlogColumnStatisticsList) {
-    List<ColumnStatisticsObj> hiveColumnStatisticsList = new ArrayList<>();
-    for (ColumnStatistics catalogColumnStatistics : catatlogColumnStatisticsList) {
-      ColumnStatisticsObj hiveColumnStatistics = new ColumnStatisticsObj();
-      hiveColumnStatistics.setColName(catalogColumnStatistics.getColumnName());
-      hiveColumnStatistics.setColType(catalogColumnStatistics.getColumnType());
-      hiveColumnStatistics.setStatsData(convertColumnStatisticsData(catalogColumnStatistics.getStatisticsData()));
-      hiveColumnStatisticsList.add(hiveColumnStatistics);
-    }
-
-    return hiveColumnStatisticsList;
-  }
-
-  private ColumnStatisticsData convertColumnStatisticsData(
-      com.amazonaws.services.glue.model.ColumnStatisticsData catalogColumnStatisticsData) {
-    ColumnStatisticsData hiveColumnStatisticsData = new ColumnStatisticsData();
-
-    ColumnStatisticsType type = ColumnStatisticsType.fromValue(catalogColumnStatisticsData.getType());
-    switch (type) {
-      case BINARY:
-        BinaryColumnStatisticsData catalogBinaryData = catalogColumnStatisticsData.getBinaryColumnStatisticsData();
-        BinaryColumnStatsData hiveBinaryData = new BinaryColumnStatsData();
-        hiveBinaryData.setAvgColLen(catalogBinaryData.getAverageLength());
-        hiveBinaryData.setMaxColLen(catalogBinaryData.getMaximumLength());
-        hiveBinaryData.setNumNulls(catalogBinaryData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setFieldValue(ColumnStatisticsData._Fields.BINARY_STATS, hiveBinaryData);
-        hiveColumnStatisticsData.setBinaryStats(hiveBinaryData);
-        break;
-
-      case BOOLEAN:
-        BooleanColumnStatisticsData catalogBooleanData = catalogColumnStatisticsData.getBooleanColumnStatisticsData();
-        BooleanColumnStatsData hiveBooleanData = new BooleanColumnStatsData();
-        hiveBooleanData.setNumFalses(catalogBooleanData.getNumberOfFalses());
-        hiveBooleanData.setNumTrues(catalogBooleanData.getNumberOfTrues());
-        hiveBooleanData.setNumNulls(catalogBooleanData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setBooleanStats(hiveBooleanData);
-        break;
-
-      case DATE:
-        DateColumnStatisticsData catalogDateData = catalogColumnStatisticsData.getDateColumnStatisticsData();
-        DateColumnStatsData hiveDateData = new DateColumnStatsData();
-        hiveDateData.setLowValue(ConverterUtils.dateToHiveDate(catalogDateData.getMinimumValue()));
-        hiveDateData.setHighValue(ConverterUtils.dateToHiveDate(catalogDateData.getMaximumValue()));
-        hiveDateData.setNumDVs(catalogDateData.getNumberOfDistinctValues());
-        hiveDateData.setNumNulls(catalogDateData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setDateStats(hiveDateData);
-        break;
-
-      case DECIMAL:
-        DecimalColumnStatisticsData catalogDecimalData = catalogColumnStatisticsData.getDecimalColumnStatisticsData();
-        DecimalColumnStatsData hiveDecimalData = new DecimalColumnStatsData();
-        hiveDecimalData.setLowValue(convertDecimal(catalogDecimalData.getMinimumValue()));
-        hiveDecimalData.setHighValue(convertDecimal(catalogDecimalData.getMaximumValue()));
-        hiveDecimalData.setNumDVs(catalogDecimalData.getNumberOfDistinctValues());
-        hiveDecimalData.setNumNulls(catalogDecimalData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setDecimalStats(hiveDecimalData);
-        break;
-
-      case DOUBLE:
-        DoubleColumnStatisticsData catalogDoubleData = catalogColumnStatisticsData.getDoubleColumnStatisticsData();
-        DoubleColumnStatsData hiveDoubleData = new DoubleColumnStatsData();
-        hiveDoubleData.setLowValue(catalogDoubleData.getMinimumValue());
-        hiveDoubleData.setHighValue(catalogDoubleData.getMaximumValue());
-        hiveDoubleData.setNumDVs(catalogDoubleData.getNumberOfDistinctValues());
-        hiveDoubleData.setNumNulls(catalogDoubleData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setDoubleStats(hiveDoubleData);
-        break;
-
-      case LONG:
-        LongColumnStatisticsData catalogLongData = catalogColumnStatisticsData.getLongColumnStatisticsData();
-        LongColumnStatsData hiveLongData = new LongColumnStatsData();
-        hiveLongData.setLowValue(catalogLongData.getMinimumValue());
-        hiveLongData.setHighValue(catalogLongData.getMaximumValue());
-        hiveLongData.setNumDVs(catalogLongData.getNumberOfDistinctValues());
-        hiveLongData.setNumNulls(catalogLongData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setLongStats(hiveLongData);
-        break;
-
-      case STRING:
-        StringColumnStatisticsData catalogStringData = catalogColumnStatisticsData.getStringColumnStatisticsData();
-        StringColumnStatsData hiveStringData = new StringColumnStatsData();
-        hiveStringData.setAvgColLen(catalogStringData.getAverageLength());
-        hiveStringData.setMaxColLen(catalogStringData.getMaximumLength());
-        hiveStringData.setNumDVs(catalogStringData.getNumberOfDistinctValues());
-        hiveStringData.setNumNulls(catalogStringData.getNumberOfNulls());
-
-        hiveColumnStatisticsData.setStringStats(hiveStringData);
-        break;
-    }
-
-    return hiveColumnStatisticsData;
-  }
-
-  private Decimal convertDecimal(com.amazonaws.services.glue.model.DecimalNumber catalogDecimal) {
-    Decimal hiveDecimal = new Decimal();
-    hiveDecimal.setUnscaled(catalogDecimal.getUnscaledValue());
-    hiveDecimal.setScale(catalogDecimal.getScale().shortValue());
-    return hiveDecimal;
-  }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverter.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverter.java
deleted file mode 100644
index 7d6a0f4b3a..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverter.java
+++ /dev/null
@@ -1,58 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ErrorDetail;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.thrift.TException;
-
-import java.util.List;
-
-public interface CatalogToHiveConverter {
-
-  TException wrapInHiveException(Throwable e);
-
-  TException errorDetailToHiveException(ErrorDetail errorDetail);
-
-  Database convertDatabase(com.amazonaws.services.glue.model.Database catalogDatabase);
-
-  List<FieldSchema> convertFieldSchemaList(List<com.amazonaws.services.glue.model.Column> catalogFieldSchemaList);
-
-  Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname);
-
-  TableMeta convertTableMeta(com.amazonaws.services.glue.model.Table catalogTable, String dbName);
-
-  Partition convertPartition(com.amazonaws.services.glue.model.Partition src);
-
-  List<Partition> convertPartitions(List<com.amazonaws.services.glue.model.Partition> src);
-
-  Function convertFunction(String dbName, com.amazonaws.services.glue.model.UserDefinedFunction catalogFunction);
-
-  List<ColumnStatisticsObj> convertColumnStatisticsList(List<ColumnStatistics> catatlogColumnStatisticsList);
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverterFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverterFactory.java
deleted file mode 100644
index d8430ec169..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/CatalogToHiveConverterFactory.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hive.common.util.HiveVersionInfo;
-
-public class CatalogToHiveConverterFactory {
-
-  private static final String HIVE_3_VERSION = "3.";
-
-  private static CatalogToHiveConverter catalogToHiveConverter;
-
-  public static CatalogToHiveConverter getCatalogToHiveConverter() {
-    if (catalogToHiveConverter == null) {
-      catalogToHiveConverter = loadConverter();
-    }
-    return catalogToHiveConverter;
-  }
-
-  private static CatalogToHiveConverter loadConverter() {
-    String hiveVersion = HiveVersionInfo.getShortVersion();
-
-    if (hiveVersion.startsWith(HIVE_3_VERSION)) {
-      return new Hive3CatalogToHiveConverter();
-    } else {
-      return new BaseCatalogToHiveConverter();
-    }
-  }
-
-  @VisibleForTesting
-  static void clearConverter() {
-    catalogToHiveConverter = null;
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/ConverterUtils.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/ConverterUtils.java
deleted file mode 100644
index b350631931..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/ConverterUtils.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.services.glue.model.Table;
-import com.google.gson.Gson;
-
-import java.util.Date;
-import java.util.concurrent.TimeUnit;
-
-public class ConverterUtils {
-
-  private static final Gson gson = new Gson();
-
-  public static String catalogTableToString(final Table table) {
-    return gson.toJson(table);
-  }
-
-  public static Table stringToCatalogTable(final String input) {
-    return gson.fromJson(input, Table.class);
-  }
-
-  public static org.apache.hadoop.hive.metastore.api.Date dateToHiveDate(Date date) {
-    return new org.apache.hadoop.hive.metastore.api.Date(TimeUnit.MILLISECONDS.toDays(date.getTime()));
-  }
-
-  public static Date hiveDatetoDate(org.apache.hadoop.hive.metastore.api.Date hiveDate) {
-    return new Date(TimeUnit.DAYS.toMillis(hiveDate.getDaysSinceEpoch()));
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/GlueInputConverter.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/GlueInputConverter.java
deleted file mode 100644
index 45889e0ae6..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/GlueInputConverter.java
+++ /dev/null
@@ -1,116 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.services.glue.model.DatabaseInput;
-import com.amazonaws.services.glue.model.PartitionInput;
-import com.amazonaws.services.glue.model.TableInput;
-import com.amazonaws.services.glue.model.UserDefinedFunctionInput;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-/**
- * This class provides methods to convert Hive/Catalog objects to Input objects used
- * for Glue API parameters
- */
-public final class GlueInputConverter {
-
-  public static DatabaseInput convertToDatabaseInput(Database hiveDatabase) {
-    return convertToDatabaseInput(HiveToCatalogConverter.convertDatabase(hiveDatabase));
-  }
-
-  public static DatabaseInput convertToDatabaseInput(com.amazonaws.services.glue.model.Database database) {
-    DatabaseInput input = new DatabaseInput();
-
-    input.setName(database.getName());
-    input.setDescription(database.getDescription());
-    input.setLocationUri(database.getLocationUri());
-    input.setParameters(database.getParameters());
-
-    return input;
-  }
-
-  public static TableInput convertToTableInput(Table hiveTable) {
-    return convertToTableInput(HiveToCatalogConverter.convertTable(hiveTable));
-  }
-
-  public static TableInput convertToTableInput(com.amazonaws.services.glue.model.Table table) {
-    TableInput tableInput = new TableInput();
-
-    tableInput.setRetention(table.getRetention());
-    tableInput.setPartitionKeys(table.getPartitionKeys());
-    tableInput.setTableType(table.getTableType());
-    tableInput.setName(table.getName());
-    tableInput.setOwner(table.getOwner());
-    tableInput.setLastAccessTime(table.getLastAccessTime());
-    tableInput.setStorageDescriptor(table.getStorageDescriptor());
-    tableInput.setParameters(table.getParameters());
-    tableInput.setViewExpandedText(table.getViewExpandedText());
-    tableInput.setViewOriginalText(table.getViewOriginalText());
-
-    return tableInput;
-  }
-
-  public static PartitionInput convertToPartitionInput(Partition src) {
-    return convertToPartitionInput(HiveToCatalogConverter.convertPartition(src));
-  }
-
-  public static PartitionInput convertToPartitionInput(com.amazonaws.services.glue.model.Partition src) {
-    PartitionInput partitionInput = new PartitionInput();
-
-    partitionInput.setLastAccessTime(src.getLastAccessTime());
-    partitionInput.setParameters(src.getParameters());
-    partitionInput.setStorageDescriptor(src.getStorageDescriptor());
-    partitionInput.setValues(src.getValues());
-
-    return partitionInput;
-  }
-
-  public static List<PartitionInput> convertToPartitionInputs(Collection<com.amazonaws.services.glue.model.Partition> parts) {
-    List<PartitionInput> inputList = new ArrayList<>();
-
-    for (com.amazonaws.services.glue.model.Partition part : parts) {
-      inputList.add(convertToPartitionInput(part));
-    }
-    return inputList;
-  }
-
-  public static UserDefinedFunctionInput convertToUserDefinedFunctionInput(Function hiveFunction) {
-    UserDefinedFunctionInput functionInput = new UserDefinedFunctionInput();
-
-    functionInput.setClassName(hiveFunction.getClassName());
-    functionInput.setFunctionName(hiveFunction.getFunctionName());
-    functionInput.setOwnerName(hiveFunction.getOwnerName());
-    if(hiveFunction.getOwnerType() != null) {
-      functionInput.setOwnerType(hiveFunction.getOwnerType().name());
-    }
-    functionInput.setResourceUris(HiveToCatalogConverter.covertResourceUriList(hiveFunction.getResourceUris()));
-    return functionInput;
-  }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/Hive3CatalogToHiveConverter.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/Hive3CatalogToHiveConverter.java
deleted file mode 100644
index 4252ecd38a..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/Hive3CatalogToHiveConverter.java
+++ /dev/null
@@ -1,70 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-
-public class Hive3CatalogToHiveConverter extends BaseCatalogToHiveConverter {
-
-  @Override
-  public Database convertDatabase(com.amazonaws.services.glue.model.Database catalogDatabase) {
-    Database hiveDatabase = super.convertDatabase(catalogDatabase);
-    hiveDatabase.setCatalogName(DEFAULT_CATALOG_NAME);
-    return hiveDatabase;
-  }
-
-  @Override
-  public Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) {
-    Table hiveTable = super.convertTable(catalogTable, dbname);
-    hiveTable.setCatName(DEFAULT_CATALOG_NAME);
-    return hiveTable;
-  }
-
-  @Override
-  public TableMeta convertTableMeta(com.amazonaws.services.glue.model.Table catalogTable, String dbName) {
-    TableMeta tableMeta = super.convertTableMeta(catalogTable, dbName);
-    tableMeta.setCatName(DEFAULT_CATALOG_NAME);
-    return tableMeta;
-  }
-
-  @Override
-  public Partition convertPartition(com.amazonaws.services.glue.model.Partition src) {
-    Partition hivePartition = super.convertPartition(src);
-    hivePartition.setCatName(DEFAULT_CATALOG_NAME);
-    return hivePartition;
-  }
-
-  @Override
-  public Function convertFunction(String dbName, com.amazonaws.services.glue.model.UserDefinedFunction catalogFunction) {
-    Function hiveFunction = super.convertFunction(dbName, catalogFunction);
-    if (hiveFunction == null) {
-      return null;
-    }
-    hiveFunction.setCatName(DEFAULT_CATALOG_NAME);
-    return hiveFunction;
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/HiveToCatalogConverter.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/HiveToCatalogConverter.java
deleted file mode 100644
index 48f4ca73df..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/HiveToCatalogConverter.java
+++ /dev/null
@@ -1,372 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.services.glue.model.BinaryColumnStatisticsData;
-import com.amazonaws.services.glue.model.BooleanColumnStatisticsData;
-import com.amazonaws.services.glue.model.ColumnStatisticsType;
-import com.amazonaws.services.glue.model.DateColumnStatisticsData;
-import com.amazonaws.services.glue.model.DecimalColumnStatisticsData;
-import com.amazonaws.services.glue.model.DoubleColumnStatisticsData;
-import com.amazonaws.services.glue.model.LongColumnStatisticsData;
-import com.amazonaws.services.glue.model.StringColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DateColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-public class HiveToCatalogConverter {
-
-  public static com.amazonaws.services.glue.model.Database convertDatabase(Database hiveDatabase) {
-    com.amazonaws.services.glue.model.Database catalogDatabase = new com.amazonaws.services.glue.model.Database();
-    catalogDatabase.setName(hiveDatabase.getName());
-    catalogDatabase.setDescription(hiveDatabase.getDescription());
-    catalogDatabase.setLocationUri(hiveDatabase.getLocationUri());
-    catalogDatabase.setParameters(hiveDatabase.getParameters());
-    return catalogDatabase;
-  }
-
-  public static com.amazonaws.services.glue.model.Table convertTable(
-          Table hiveTable) {
-    com.amazonaws.services.glue.model.Table catalogTable = new com.amazonaws.services.glue.model.Table();
-    catalogTable.setRetention(hiveTable.getRetention());
-    catalogTable.setPartitionKeys(convertFieldSchemaList(hiveTable.getPartitionKeys()));
-    catalogTable.setTableType(hiveTable.getTableType());
-    catalogTable.setName(hiveTable.getTableName());
-    catalogTable.setOwner(hiveTable.getOwner());
-    catalogTable.setCreateTime(new Date((long) hiveTable.getCreateTime() * 1000));
-    catalogTable.setLastAccessTime(new Date((long) hiveTable.getLastAccessTime() * 1000));
-    catalogTable.setStorageDescriptor(convertStorageDescriptor(hiveTable.getSd()));
-    catalogTable.setParameters(hiveTable.getParameters());
-    catalogTable.setViewExpandedText(hiveTable.getViewExpandedText());
-    catalogTable.setViewOriginalText(hiveTable.getViewOriginalText());
-
-    return catalogTable;
-  }
-
-  public static com.amazonaws.services.glue.model.StorageDescriptor convertStorageDescriptor(
-          StorageDescriptor hiveSd) {
-    com.amazonaws.services.glue.model.StorageDescriptor catalogSd =
-            new com.amazonaws.services.glue.model.StorageDescriptor();
-    catalogSd.setNumberOfBuckets(hiveSd.getNumBuckets());
-    catalogSd.setCompressed(hiveSd.isCompressed());
-    catalogSd.setParameters(hiveSd.getParameters());
-    catalogSd.setBucketColumns(hiveSd.getBucketCols());
-    catalogSd.setColumns(convertFieldSchemaList(hiveSd.getCols()));
-    catalogSd.setInputFormat(hiveSd.getInputFormat());
-    catalogSd.setLocation(hiveSd.getLocation());
-    catalogSd.setOutputFormat(hiveSd.getOutputFormat());
-    catalogSd.setSerdeInfo(convertSerDeInfo(hiveSd.getSerdeInfo()));
-    catalogSd.setSkewedInfo(convertSkewedInfo(hiveSd.getSkewedInfo()));
-    catalogSd.setSortColumns(convertOrderList(hiveSd.getSortCols()));
-    catalogSd.setStoredAsSubDirectories(hiveSd.isStoredAsSubDirectories());
-
-    return catalogSd;
-  }
-
-  public static com.amazonaws.services.glue.model.Column convertFieldSchema(
-          FieldSchema hiveFieldSchema) {
-    com.amazonaws.services.glue.model.Column catalogFieldSchema =
-            new com.amazonaws.services.glue.model.Column();
-    catalogFieldSchema.setComment(hiveFieldSchema.getComment());
-    catalogFieldSchema.setName(hiveFieldSchema.getName());
-    catalogFieldSchema.setType(hiveFieldSchema.getType());
-
-    return catalogFieldSchema;
-  }
-
-  public static List<com.amazonaws.services.glue.model.Column> convertFieldSchemaList(
-          List<FieldSchema> hiveFieldSchemaList) {
-    List<com.amazonaws.services.glue.model.Column> catalogFieldSchemaList =
-            new ArrayList<com.amazonaws.services.glue.model.Column>();
-    for (FieldSchema hiveFs : hiveFieldSchemaList){
-      catalogFieldSchemaList.add(convertFieldSchema(hiveFs));
-    }
-
-    return catalogFieldSchemaList;
-  }
-
-  public static com.amazonaws.services.glue.model.SerDeInfo convertSerDeInfo(
-          SerDeInfo hiveSerDeInfo) {
-    com.amazonaws.services.glue.model.SerDeInfo catalogSerDeInfo = new com.amazonaws.services.glue.model.SerDeInfo();
-    catalogSerDeInfo.setName(hiveSerDeInfo.getName());
-    catalogSerDeInfo.setParameters(hiveSerDeInfo.getParameters());
-    catalogSerDeInfo.setSerializationLibrary(hiveSerDeInfo.getSerializationLib());
-
-    return catalogSerDeInfo;
-  }
-
-  public static com.amazonaws.services.glue.model.SkewedInfo convertSkewedInfo(SkewedInfo hiveSkewedInfo) {
-    if (hiveSkewedInfo == null)
-      return null;
-    com.amazonaws.services.glue.model.SkewedInfo catalogSkewedInfo = new com.amazonaws.services.glue.model.SkewedInfo()
-            .withSkewedColumnNames(hiveSkewedInfo.getSkewedColNames())
-            .withSkewedColumnValues(convertSkewedValue(hiveSkewedInfo.getSkewedColValues()))
-            .withSkewedColumnValueLocationMaps(convertSkewedMap(hiveSkewedInfo.getSkewedColValueLocationMaps()));
-    return catalogSkewedInfo;
-  }
-
-  public static com.amazonaws.services.glue.model.Order convertOrder(Order hiveOrder) {
-    com.amazonaws.services.glue.model.Order order = new com.amazonaws.services.glue.model.Order();
-    order.setColumn(hiveOrder.getCol());
-    order.setSortOrder(hiveOrder.getOrder());
-
-    return order;
-  }
-
-  public static List<com.amazonaws.services.glue.model.Order> convertOrderList(List<Order> hiveOrderList) {
-    if (hiveOrderList == null) {
-      return null;
-    }
-    List<com.amazonaws.services.glue.model.Order> catalogOrderList = new ArrayList<>();
-    for (Order hiveOrder : hiveOrderList) {
-      catalogOrderList.add(convertOrder(hiveOrder));
-    }
-
-    return catalogOrderList;
-  }
-
-  public static com.amazonaws.services.glue.model.Partition convertPartition(Partition src) {
-    com.amazonaws.services.glue.model.Partition tgt = new com.amazonaws.services.glue.model.Partition();
-
-    tgt.setDatabaseName(src.getDbName());
-    tgt.setTableName(src.getTableName());
-    tgt.setCreationTime(new Date((long) src.getCreateTime() * 1000));
-    tgt.setLastAccessTime(new Date((long) src.getLastAccessTime() * 1000));
-    tgt.setParameters(src.getParameters());
-    tgt.setStorageDescriptor(convertStorageDescriptor(src.getSd()));
-    tgt.setValues(src.getValues());
-
-    return tgt;
-  }
-
-  public static String convertListToString(final List<String> list) {
-    if (list == null) {
-      return null;
-    }
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < list.size(); i++) {
-      String currentString = list.get(i);
-      sb.append(currentString.length() + "$" + currentString);
-    }
-
-    return sb.toString();
-  }
-
-  public static Map<String, String> convertSkewedMap(final Map<List<String>, String> coreSkewedMap){
-    if (coreSkewedMap == null){
-      return null;
-    }
-    Map<String, String> catalogSkewedMap = new HashMap<>();
-    for (List<String> coreKey : coreSkewedMap.keySet()) {
-      catalogSkewedMap.put(convertListToString(coreKey), coreSkewedMap.get(coreKey));
-    }
-    return catalogSkewedMap;
-  }
-
-  public static List<String> convertSkewedValue(final List<List<String>> coreSkewedValue) {
-    if (coreSkewedValue == null) {
-      return null;
-    }
-    List<String> catalogSkewedValue = new ArrayList<>();
-    for (int i = 0; i < coreSkewedValue.size(); i++) {
-      catalogSkewedValue.add(convertListToString(coreSkewedValue.get(i)));
-    }
-
-    return catalogSkewedValue;
-  }
-
-  public static com.amazonaws.services.glue.model.UserDefinedFunction convertFunction(final Function hiveFunction) {
-    if (hiveFunction == null ){
-      return null;
-    }
-    com.amazonaws.services.glue.model.UserDefinedFunction catalogFunction = new com.amazonaws.services.glue.model.UserDefinedFunction();
-    catalogFunction.setClassName(hiveFunction.getClassName());
-    catalogFunction.setFunctionName(hiveFunction.getFunctionName());
-    catalogFunction.setCreateTime(new Date((long) (hiveFunction.getCreateTime()) * 1000));
-    catalogFunction.setOwnerName(hiveFunction.getOwnerName());
-    if(hiveFunction.getOwnerType() != null) {
-      catalogFunction.setOwnerType(hiveFunction.getOwnerType().name());
-    }
-    catalogFunction.setResourceUris(covertResourceUriList(hiveFunction.getResourceUris()));
-    return catalogFunction;
-  }
-
-  public static List<com.amazonaws.services.glue.model.ResourceUri> covertResourceUriList(
-          final List<ResourceUri> hiveResourceUriList) {
-    if (hiveResourceUriList == null) {
-      return null;
-    }
-    List<com.amazonaws.services.glue.model.ResourceUri> catalogResourceUriList = new ArrayList<>();
-    for (ResourceUri hiveResourceUri : hiveResourceUriList) {
-      com.amazonaws.services.glue.model.ResourceUri catalogResourceUri = new com.amazonaws.services.glue.model.ResourceUri();
-      catalogResourceUri.setUri(hiveResourceUri.getUri());
-      if (hiveResourceUri.getResourceType() != null) {
-        catalogResourceUri.setResourceType(hiveResourceUri.getResourceType().name());
-      }
-      catalogResourceUriList.add(catalogResourceUri);
-    }
-    return catalogResourceUriList;
-  }
-
-  public static List<com.amazonaws.services.glue.model.ColumnStatistics> convertColumnStatisticsObjList(
-          ColumnStatistics hiveColumnStatistics) {
-    ColumnStatisticsDesc hiveColumnStatisticsDesc = hiveColumnStatistics.getStatsDesc();
-    List<ColumnStatisticsObj> hiveColumnStatisticsObjs = hiveColumnStatistics.getStatsObj();
-
-    List<com.amazonaws.services.glue.model.ColumnStatistics> catalogColumnStatisticsList = new ArrayList<>();
-    for (ColumnStatisticsObj hiveColumnStatisticsObj : hiveColumnStatisticsObjs) {
-      com.amazonaws.services.glue.model.ColumnStatistics catalogColumnStatistics =
-              new com.amazonaws.services.glue.model.ColumnStatistics();
-      catalogColumnStatistics.setColumnName(hiveColumnStatisticsObj.getColName());
-      catalogColumnStatistics.setColumnType(hiveColumnStatisticsObj.getColType());
-      // Last analyzed time in Hive is in days since Epoch, Java Date is in milliseconds
-      catalogColumnStatistics.setAnalyzedTime(new Date(TimeUnit.DAYS.toMillis(hiveColumnStatisticsDesc.getLastAnalyzed())));
-      catalogColumnStatistics.setStatisticsData(convertColumnStatisticsData(hiveColumnStatisticsObj.getStatsData()));
-      catalogColumnStatisticsList.add(catalogColumnStatistics);
-    }
-
-    return catalogColumnStatisticsList;
-  }
-
-  private static com.amazonaws.services.glue.model.ColumnStatisticsData convertColumnStatisticsData(
-          ColumnStatisticsData hiveColumnStatisticsData) {
-    com.amazonaws.services.glue.model.ColumnStatisticsData catalogColumnStatisticsData =
-            new com.amazonaws.services.glue.model.ColumnStatisticsData();
-
-    // Hive uses the TUnion object to ensure that only one stats object is set at any time, this means that we can
-    // only call the get*() of a stats type if the 'setField' is set to that value
-    ColumnStatisticsData._Fields setField = hiveColumnStatisticsData.getSetField();
-    switch (setField) {
-      case BINARY_STATS:
-        BinaryColumnStatsData hiveBinaryData = hiveColumnStatisticsData.getBinaryStats();
-        BinaryColumnStatisticsData catalogBinaryData = new BinaryColumnStatisticsData();
-        catalogBinaryData.setNumberOfNulls(hiveBinaryData.getNumNulls());
-        catalogBinaryData.setMaximumLength(hiveBinaryData.getMaxColLen());
-        catalogBinaryData.setAverageLength(hiveBinaryData.getAvgColLen());
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.BINARY));
-        catalogColumnStatisticsData.setBinaryColumnStatisticsData(catalogBinaryData);
-        break;
-
-      case BOOLEAN_STATS:
-        BooleanColumnStatsData hiveBooleanData = hiveColumnStatisticsData.getBooleanStats();
-        BooleanColumnStatisticsData catalogBooleanData = new BooleanColumnStatisticsData();
-        catalogBooleanData.setNumberOfNulls(hiveBooleanData.getNumNulls());
-        catalogBooleanData.setNumberOfFalses(hiveBooleanData.getNumFalses());
-        catalogBooleanData.setNumberOfTrues(hiveBooleanData.getNumTrues());
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.BOOLEAN));
-        catalogColumnStatisticsData.setBooleanColumnStatisticsData(catalogBooleanData);
-        break;
-
-      case DATE_STATS:
-        DateColumnStatsData hiveDateData = hiveColumnStatisticsData.getDateStats();
-        DateColumnStatisticsData catalogDateData = new DateColumnStatisticsData();
-        catalogDateData.setNumberOfNulls(hiveDateData.getNumNulls());
-        catalogDateData.setNumberOfDistinctValues(hiveDateData.getNumDVs());
-        catalogDateData.setMaximumValue(ConverterUtils.hiveDatetoDate(hiveDateData.getHighValue()));
-        catalogDateData.setMinimumValue(ConverterUtils.hiveDatetoDate(hiveDateData.getLowValue()));
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.DATE));
-        catalogColumnStatisticsData.setDateColumnStatisticsData(catalogDateData);
-        break;
-
-      case DECIMAL_STATS:
-        DecimalColumnStatsData hiveDecimalData = hiveColumnStatisticsData.getDecimalStats();
-        DecimalColumnStatisticsData catalogDecimalData = new DecimalColumnStatisticsData();
-        catalogDecimalData.setNumberOfNulls(hiveDecimalData.getNumNulls());
-        catalogDecimalData.setNumberOfDistinctValues(hiveDecimalData.getNumDVs());
-        catalogDecimalData.setMaximumValue(convertDecimal(hiveDecimalData.getHighValue()));
-        catalogDecimalData.setMinimumValue(convertDecimal(hiveDecimalData.getLowValue()));
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.DECIMAL));
-        catalogColumnStatisticsData.setDecimalColumnStatisticsData(catalogDecimalData);
-        break;
-
-      case DOUBLE_STATS:
-        DoubleColumnStatsData hiveDoubleData = hiveColumnStatisticsData.getDoubleStats();
-        DoubleColumnStatisticsData catalogDoubleData = new DoubleColumnStatisticsData();
-        catalogDoubleData.setNumberOfNulls(hiveDoubleData.getNumNulls());
-        catalogDoubleData.setNumberOfDistinctValues(hiveDoubleData.getNumDVs());
-        catalogDoubleData.setMaximumValue(hiveDoubleData.getHighValue());
-        catalogDoubleData.setMinimumValue(hiveDoubleData.getLowValue());
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.DOUBLE));
-        catalogColumnStatisticsData.setDoubleColumnStatisticsData(catalogDoubleData);
-        break;
-      case LONG_STATS:
-        LongColumnStatsData hiveLongData = hiveColumnStatisticsData.getLongStats();
-        LongColumnStatisticsData catalogLongData = new LongColumnStatisticsData();
-        catalogLongData.setNumberOfNulls(hiveLongData.getNumNulls());
-        catalogLongData.setNumberOfDistinctValues(hiveLongData.getNumDVs());
-        catalogLongData.setMaximumValue(hiveLongData.getHighValue());
-        catalogLongData.setMinimumValue(hiveLongData.getLowValue());
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.LONG));
-        catalogColumnStatisticsData.setLongColumnStatisticsData(catalogLongData);
-        break;
-
-      case STRING_STATS:
-        StringColumnStatsData hiveStringData = hiveColumnStatisticsData.getStringStats();
-        StringColumnStatisticsData catalogStringData = new StringColumnStatisticsData();
-        catalogStringData.setNumberOfNulls(hiveStringData.getNumNulls());
-        catalogStringData.setNumberOfDistinctValues(hiveStringData.getNumDVs());
-        catalogStringData.setMaximumLength(hiveStringData.getMaxColLen());
-        catalogStringData.setAverageLength(hiveStringData.getAvgColLen());
-        catalogColumnStatisticsData.setType(String.valueOf(ColumnStatisticsType.STRING));
-        catalogColumnStatisticsData.setStringColumnStatisticsData(catalogStringData);
-        break;
-    }
-
-    return catalogColumnStatisticsData;
-  }
-
-  private static com.amazonaws.services.glue.model.DecimalNumber convertDecimal(Decimal hiveDecimal) {
-    com.amazonaws.services.glue.model.DecimalNumber catalogDecimal =
-            new com.amazonaws.services.glue.model.DecimalNumber();
-    catalogDecimal.setUnscaledValue(ByteBuffer.wrap(hiveDecimal.getUnscaled()));
-    catalogDecimal.setScale((int)hiveDecimal.getScale());
-    return catalogDecimal;
-  }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/PartitionNameParser.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/PartitionNameParser.java
deleted file mode 100644
index 1419896a58..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/converters/PartitionNameParser.java
+++ /dev/null
@@ -1,143 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.converters;
-
-import com.amazonaws.glue.catalog.exceptions.InvalidPartitionNameException;
-import com.google.common.collect.ImmutableSet;
-
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-public class PartitionNameParser {
-
-  private static final Pattern PARTITION_NAME_VALUE_PATTERN = Pattern.compile("([^/]+)=([^/]+)");
-  private static final String PARTITION_NAME_DELIMITER = "/";
-
-  private static final char STORE_AS_NUMBER = 'n';
-  private static final char STORE_AS_STRING = 's';
-
-  private static final Set<String> NUMERIC_PARTITION_COLUMN_TYPES = ImmutableSet.of(
-          "tinyint",
-          "smallint",
-          "int",
-          "bigint"
-  );
-
-  public static String getPartitionName(List<String> partitionColumns, List<String> partitionValues) {
-    if (hasInvalidValues(partitionColumns, partitionValues) || hasInvalidSize(partitionColumns, partitionValues)) {
-      throw new IllegalArgumentException("Partition is not well formed. Columns and values do no match.");
-    }
-
-    StringBuilder partitionName = new StringBuilder();
-    partitionName.append(getPartitionColumnName(partitionColumns.get(0), partitionValues.get(0)));
-
-    for (int i = 1; i < partitionColumns.size(); i++) {
-      partitionName.append(PARTITION_NAME_DELIMITER);
-      partitionName.append(getPartitionColumnName(partitionColumns.get(i), partitionValues.get(i)));
-    }
-
-    return partitionName.toString();
-  }
-
-  private static boolean hasInvalidValues(List<String> partitionColumns, List<String> partitionValues) {
-    return partitionColumns == null || partitionValues == null;
-  }
-
-  private static boolean hasInvalidSize(List<String> partitionColumns, List<String> partitionValues) {
-    return partitionColumns.size() != partitionValues.size();
-  }
-
-  private static String getPartitionColumnName(String partitionColumn, String partitionValue) {
-    return partitionColumn + "=" + partitionValue;
-  }
-
-  public static LinkedHashMap<String, String> getPartitionColumns(String partitionName) {
-    LinkedHashMap<String, String> partitionColumns = new LinkedHashMap<>();
-    String[] partitions = partitionName.split(PARTITION_NAME_DELIMITER);
-    for(String partition : partitions) {
-      Entry<String, String> entry = getPartitionColumnValuePair(partition);
-      partitionColumns.put(entry.getKey(), entry.getValue());
-    }
-
-    return partitionColumns;
-  }
-
-  /*
-   * Copied from https://github.com/apache/hive/blob/master/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
-   */
-  public static String unescapePathName(String path) {
-    int len = path.length();
-    //pre-allocate sb to have enough buffer size, to avoid realloc
-    StringBuilder sb = new StringBuilder(len);
-    for (int i = 0; i < len; i++) {
-      char c = path.charAt(i);
-      if (c == '%' && i + 2 < len) {
-        int code = -1;
-        try {
-          code = Integer.parseInt(path.substring(i + 1, i + 3), 16);
-        } catch (Exception e) {
-          code = -1;
-        }
-        if (code >= 0) {
-          sb.append((char) code);
-          i += 2;
-          continue;
-        }
-      }
-      sb.append(c);
-    }
-    return sb.toString();
-  }
-
-  private static AbstractMap.SimpleEntry getPartitionColumnValuePair(String partition) {
-    String column = null;
-    String value = null;
-    Matcher partitionMatcher = PARTITION_NAME_VALUE_PATTERN.matcher(partition);
-
-    if (partitionMatcher.matches()) {
-      column = unescapePathName(partitionMatcher.group(1));
-      value = unescapePathName(partitionMatcher.group(2));
-    } else {
-      throw new InvalidPartitionNameException(partition);
-    }
-
-    return new AbstractMap.SimpleEntry(column, value);
-  }
-
-  public static List<String> getPartitionValuesFromName(String partitionName) {
-    List<String> partitionValues = new ArrayList<>();
-    String[] partitions = partitionName.split(PARTITION_NAME_DELIMITER);
-    for(String partition : partitions) {
-      Entry<String, String> entry = getPartitionColumnValuePair(partition);
-      partitionValues.add(entry.getValue());
-    }
-
-    return partitionValues;
-  }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProvider.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProvider.java
deleted file mode 100644
index dd9f480397..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProvider.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.amazonaws.glue.catalog.credentials;
-
-import com.amazonaws.SdkClientException;
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.auth.BasicSessionCredentials;
-import com.amazonaws.glue.catalog.util.AWSGlueConfig;
-import com.amazonaws.util.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-
-public class ConfigurationAWSCredentialsProvider implements AWSCredentialsProvider {
-
-    private Configuration conf;
-
-    public ConfigurationAWSCredentialsProvider(Configuration conf) {
-        this.conf = conf;
-    }
-
-    @Override
-    public AWSCredentials getCredentials() {
-        String accessKey = StringUtils.trim(conf.get(AWSGlueConfig.AWS_GLUE_ACCESS_KEY));
-        String secretKey = StringUtils.trim(conf.get(AWSGlueConfig.AWS_GLUE_SECRET_KEY));
-        String sessionToken = StringUtils.trim(conf.get(AWSGlueConfig.AWS_GLUE_SESSION_TOKEN));
-        if (!StringUtils.isNullOrEmpty(accessKey) && !StringUtils.isNullOrEmpty(secretKey)) {
-            return (StringUtils.isNullOrEmpty(sessionToken) ? new BasicAWSCredentials(accessKey,
-                    secretKey) : new BasicSessionCredentials(accessKey, secretKey, sessionToken));
-        } else {
-            throw new SdkClientException(
-                    "Unable to load AWS credentials from hive conf (aws.glue.access-key and aws.glue.secret-key)");
-        }
-    }
-
-    @Override
-    public void refresh() {
-
-    }
-
-    @Override
-    public String toString() {
-        return this.getClass().getSimpleName();
-    }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProviderFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProviderFactory.java
deleted file mode 100644
index c1c526b815..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/credentials/ConfigurationAWSCredentialsProviderFactory.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.amazonaws.glue.catalog.credentials;
-
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.glue.catalog.metastore.AWSCredentialsProviderFactory;
-import org.apache.hadoop.conf.Configuration;
-
-public class ConfigurationAWSCredentialsProviderFactory implements AWSCredentialsProviderFactory {
-    @Override
-    public AWSCredentialsProvider buildAWSCredentialsProvider(Configuration conf) {
-        return new ConfigurationAWSCredentialsProvider(conf);
-    }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/InvalidPartitionNameException.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/InvalidPartitionNameException.java
deleted file mode 100644
index c2870dd2c1..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/InvalidPartitionNameException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.exceptions;
-
-public class InvalidPartitionNameException extends RuntimeException {
-
-  public InvalidPartitionNameException(String message) {
-    super(message);
-  }
-
-  public InvalidPartitionNameException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/LakeFormationException.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/LakeFormationException.java
deleted file mode 100644
index 25fe259769..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/exceptions/LakeFormationException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.exceptions;
-
-public class LakeFormationException extends RuntimeException {
-
-  public LakeFormationException(String message) {
-    super(message);
-  }
-
-  public LakeFormationException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java
deleted file mode 100644
index 23d0ee9aaa..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java
+++ /dev/null
@@ -1,2481 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.glue.catalog.converters.CatalogToHiveConverter;
-import com.amazonaws.glue.catalog.converters.GlueInputConverter;
-import com.amazonaws.glue.catalog.converters.Hive3CatalogToHiveConverter;
-import com.amazonaws.glue.catalog.util.BatchDeletePartitionsHelper;
-import com.amazonaws.glue.catalog.util.ExpressionHelper;
-import com.amazonaws.glue.catalog.util.LoggingHelper;
-import com.amazonaws.glue.catalog.util.MetastoreClientUtils;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.isExternalTable;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.AlreadyExistsException;
-import com.amazonaws.services.glue.model.EntityNotFoundException;
-import com.amazonaws.services.glue.model.GetDatabaseRequest;
-import com.amazonaws.services.glue.model.Partition;
-import com.amazonaws.services.glue.model.UpdatePartitionRequest;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import static com.google.common.base.Preconditions.checkNotNull;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
-import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
-import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.utils.ObjectPair;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.net.URI;
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.regex.Pattern;
-
-public class AWSCatalogMetastoreClient implements IMetaStoreClient {
-
-  // TODO "hook" into Hive logging (hive or hive.metastore)
-  private static final Logger logger = Logger.getLogger(AWSCatalogMetastoreClient.class);
-
-  private final Configuration conf;
-  private final AWSGlue glueClient;
-  private final Warehouse wh;
-  private final GlueMetastoreClientDelegate glueMetastoreClientDelegate;
-  private final String catalogId;
-  private final CatalogToHiveConverter catalogToHiveConverter;
-
-  private static final int BATCH_DELETE_PARTITIONS_PAGE_SIZE = 25;
-  private static final int BATCH_DELETE_PARTITIONS_THREADS_COUNT = 5;
-  static final String BATCH_DELETE_PARTITIONS_THREAD_POOL_NAME_FORMAT = "batch-delete-partitions-%d";
-  private static final ExecutorService BATCH_DELETE_PARTITIONS_THREAD_POOL = Executors.newFixedThreadPool(
-          BATCH_DELETE_PARTITIONS_THREADS_COUNT,
-          new ThreadFactoryBuilder()
-                  .setNameFormat(BATCH_DELETE_PARTITIONS_THREAD_POOL_NAME_FORMAT)
-                  .setDaemon(true).build()
-  );
-
-  private Map<String, String> currentMetaVars;
-  // private final AwsGlueHiveShims hiveShims = ShimsLoader.getHiveShims();
-
-  public AWSCatalogMetastoreClient(Configuration conf, HiveMetaHookLoader hook, Boolean allowEmbedded)
-          throws MetaException {
-    this(conf, hook);
-  }
-
-  public AWSCatalogMetastoreClient(Configuration conf, HiveMetaHookLoader hook) throws MetaException {
-    this.conf = conf;
-    glueClient = new AWSGlueClientFactory(this.conf).newClient();
-    catalogToHiveConverter = new Hive3CatalogToHiveConverter();
-
-    // TODO preserve existing functionality for HiveMetaHook
-    wh = new Warehouse(this.conf);
-
-    AWSGlueMetastore glueMetastore = new AWSGlueMetastoreFactory().newMetastore(conf);
-    glueMetastoreClientDelegate = new GlueMetastoreClientDelegate(this.conf, glueMetastore, wh);
-
-    snapshotActiveConf();
-    if (!doesDefaultDBExist()) {
-      createDefaultDatabase();
-    }
-    catalogId = MetastoreClientUtils.getCatalogId(conf);
-  }
-
-  /**
-   * Currently used for unit tests
-   */
-  public static class Builder {
-
-    private Configuration conf;
-    private Warehouse wh;
-    private GlueClientFactory clientFactory;
-    private AWSGlueMetastoreFactory metastoreFactory;
-    private boolean createDefaults = true;
-    private String catalogId;
-    private GlueMetastoreClientDelegate glueMetastoreClientDelegate;
-
-    public Builder withConf(Configuration conf) {
-      this.conf = conf;
-      return this;
-    }
-
-    public Builder withClientFactory(GlueClientFactory clientFactory) {
-      this.clientFactory = clientFactory;
-      return this;
-    }
-
-    public Builder withMetastoreFactory(AWSGlueMetastoreFactory metastoreFactory) {
-      this.metastoreFactory = metastoreFactory;
-      return this;
-    }
-
-    public Builder withWarehouse(Warehouse wh) {
-      this.wh = wh;
-      return this;
-    }
-
-    public Builder withCatalogId(String catalogId) {
-      this.catalogId = catalogId;
-      return this;
-    }
-
-    public Builder withGlueMetastoreClientDelegate(GlueMetastoreClientDelegate clientDelegate) {
-      this.glueMetastoreClientDelegate = clientDelegate;
-      return this;
-    }
-
-    public AWSCatalogMetastoreClient build() throws MetaException {
-      return new AWSCatalogMetastoreClient(this);
-    }
-
-    public Builder createDefaults(boolean createDefaultDB) {
-      this.createDefaults = createDefaultDB;
-      return this;
-    }
-  }
-
-  private AWSCatalogMetastoreClient(Builder builder) throws MetaException {
-    catalogToHiveConverter = new Hive3CatalogToHiveConverter();
-    conf = MoreObjects.firstNonNull(builder.conf, MetastoreConf.newMetastoreConf());
-
-    if (builder.wh != null) {
-      this.wh = builder.wh;
-    } else {
-      this.wh = new Warehouse(conf);
-    }
-
-    if (builder.catalogId != null) {
-      this.catalogId = builder.catalogId;
-    } else {
-      this.catalogId = null;
-    }
-
-    GlueClientFactory clientFactory = MoreObjects.firstNonNull(builder.clientFactory, new AWSGlueClientFactory(conf));
-    AWSGlueMetastoreFactory metastoreFactory = MoreObjects.firstNonNull(builder.metastoreFactory,
-            new AWSGlueMetastoreFactory());
-
-    glueClient = clientFactory.newClient();
-    AWSGlueMetastore glueMetastore = metastoreFactory.newMetastore(conf);
-    glueMetastoreClientDelegate = new GlueMetastoreClientDelegate(this.conf, glueMetastore, wh);
-
-    /**
-     * It seems weird to create databases as part of client construction. This
-     * part should probably be moved to the section in hive code right after the
-     * metastore client is instantiated. For now, simply copying the
-     * functionality in the thrift server
-     */
-    if(builder.createDefaults && !doesDefaultDBExist()) {
-      createDefaultDatabase();
-    }
-  }
-
-  private boolean doesDefaultDBExist() throws MetaException {
-
-    try {
-      GetDatabaseRequest getDatabaseRequest = new GetDatabaseRequest().withName(DEFAULT_DATABASE_NAME).withCatalogId(
-              catalogId);
-      glueClient.getDatabase(getDatabaseRequest);
-    } catch (EntityNotFoundException e) {
-      return false;
-    } catch (AmazonServiceException e) {
-      String msg = "Unable to verify existence of default database: ";
-      logger.error(msg, e);
-      throw new MetaException(msg + e);
-    }
-    return true;
-  }
-
-  private void createDefaultDatabase() throws MetaException {
-    Database defaultDB = new Database();
-    defaultDB.setName(DEFAULT_DATABASE_NAME);
-    defaultDB.setDescription(DEFAULT_DATABASE_COMMENT);
-    defaultDB.setLocationUri(wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString());
-
-    org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet principalPrivilegeSet
-            = new org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet();
-    principalPrivilegeSet.setRolePrivileges(Maps.<String, List<org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo>>newHashMap());
-
-    defaultDB.setPrivileges(principalPrivilegeSet);
-
-    /**
-     * TODO: Grant access to role PUBLIC after role support is added
-     */
-    try {
-      createDatabase(defaultDB);
-    } catch (org.apache.hadoop.hive.metastore.api.AlreadyExistsException e) {
-      logger.warn("database - default already exists. Ignoring..");
-    } catch (Exception e) {
-      logger.error("Unable to create default database", e);
-    }
-  }
-
-  @Override
-  public void createDatabase(Database database) throws InvalidObjectException,
-          org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException, TException {
-    glueMetastoreClientDelegate.createDatabase(database);
-  }
-
-  @Override
-  public Database getDatabase(String name) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getDatabase(name);
-  }
-
-  @Override
-  public Database getDatabase(String catalogName, String dbName) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getDatabase(dbName);
-  }
-
-  @Override
-  public List<String> getDatabases(String pattern) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getDatabases(pattern);
-  }
-
-  @Override
-  public List<String> getDatabases(String catalogName, String dbPattern) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getDatabases(dbPattern);
-  }
-
-  @Override
-  public List<String> getAllDatabases() throws MetaException, TException {
-    return getDatabases(".*");
-  }
-
-  @Override
-  public List<String> getAllDatabases(String catalogName) throws MetaException, TException {
-    return getDatabases(".*");
-  }
-
-  @Override
-  public void alterDatabase(String databaseName, Database database) throws NoSuchObjectException, MetaException,
-          TException {
-    glueMetastoreClientDelegate.alterDatabase(databaseName, database);
-  }
-
-  @Override
-  public void alterDatabase(String catalogName, String databaseName, Database database) throws NoSuchObjectException, MetaException, TException {
-    glueMetastoreClientDelegate.alterDatabase(databaseName, database);
-  }
-
-  @Override
-  public void dropDatabase(String name) throws NoSuchObjectException, InvalidOperationException, MetaException,
-          TException {
-    dropDatabase(name, true, false, false);
-  }
-
-  @Override
-  public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws NoSuchObjectException,
-          InvalidOperationException, MetaException, TException {
-    dropDatabase(name, deleteData, ignoreUnknownDb, false);
-  }
-
-  @Override
-  public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
-          throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.dropDatabase(name, deleteData, ignoreUnknownDb, cascade);
-  }
-
-  @Override
-  public void dropDatabase(String catalogName, String dbName, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.dropDatabase(dbName, deleteData, ignoreUnknownDb, cascade);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition add_partition(org.apache.hadoop.hive.metastore.api.Partition partition)
-          throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException,
-          TException {
-    glueMetastoreClientDelegate.addPartitions(Lists.newArrayList(partition), false, true);
-    return partition;
-  }
-
-  @Override
-  public int add_partitions(List<org.apache.hadoop.hive.metastore.api.Partition> partitions)
-          throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException,
-          TException {
-    return glueMetastoreClientDelegate.addPartitions(partitions, false, true).size();
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> add_partitions(
-          List<org.apache.hadoop.hive.metastore.api.Partition> partitions,
-          boolean ifNotExists,
-          boolean needResult
-  ) throws TException {
-    return glueMetastoreClientDelegate.addPartitions(partitions, ifNotExists, needResult);
-  }
-
-  @Override
-  public int add_partitions_pspec(
-          PartitionSpecProxy pSpec
-  ) throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException,
-          MetaException, TException {
-    return glueMetastoreClientDelegate.addPartitionsSpecProxy(pSpec);
-  }
-
-  @Override
-  public void alterFunction(String dbName, String functionName, org.apache.hadoop.hive.metastore.api.Function newFunction) throws InvalidObjectException,
-          MetaException, TException {
-    glueMetastoreClientDelegate.alterFunction(dbName, functionName, newFunction);
-  }
-
-  @Override
-  public void alterFunction(String catalogName, String dbName, String functionName, Function newFunction) throws InvalidObjectException, MetaException, TException {
-    glueMetastoreClientDelegate.alterFunction(dbName, functionName, newFunction);
-  }
-
-  @Override
-  public void alter_partition(
-          String dbName,
-          String tblName,
-          org.apache.hadoop.hive.metastore.api.Partition partition
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, Lists.newArrayList(partition));
-  }
-
-  @Override
-  public void alter_partition(
-          String dbName,
-          String tblName,
-          org.apache.hadoop.hive.metastore.api.Partition partition,
-          EnvironmentContext environmentContext
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, Lists.newArrayList(partition));
-  }
-
-  @Override
-  public void alter_partition(
-          String catalogName,
-          String dbName,
-          String tblName,
-          org.apache.hadoop.hive.metastore.api.Partition partition,
-          EnvironmentContext environmentContext
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, Lists.newArrayList(partition));
-  }
-
-  @Override
-  public void alter_partitions(
-          String dbName,
-          String tblName,
-          List<org.apache.hadoop.hive.metastore.api.Partition> partitions
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, partitions);
-  }
-
-  @Override
-  public void alter_partitions(
-          String dbName,
-          String tblName,
-          List<org.apache.hadoop.hive.metastore.api.Partition> partitions,
-          EnvironmentContext environmentContext
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, partitions);
-  }
-
-  @Override
-  public void alter_partitions(
-          String catalogName,
-          String dbName,
-          String tblName,
-          List<org.apache.hadoop.hive.metastore.api.Partition> partitions,
-          EnvironmentContext environmentContext
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterPartitions(dbName, tblName, partitions);
-  }
-
-  @Override
-  public void alter_table(String dbName, String tblName, Table table)
-          throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterTable(dbName, tblName, table, null);
-  }
-
-  @Override
-  public void alter_table(String catalogName, String dbName, String tblName, Table table, EnvironmentContext environmentContext)
-          throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterTable(dbName, tblName, table, null);
-  }
-
-  @Override
-  public void alter_table(String dbName, String tblName, Table table, boolean cascade)
-          throws InvalidOperationException, MetaException, TException {
-    EnvironmentContext environmentContext = null;
-    if (cascade) {
-      environmentContext = new EnvironmentContext();
-      environmentContext.putToProperties("CASCADE", StatsSetupConst.TRUE);
-    }
-    glueMetastoreClientDelegate.alterTable(dbName, tblName, table, environmentContext);
-  }
-
-  @Override
-  public void alter_table_with_environmentContext(
-          String dbName,
-          String tblName,
-          Table table,
-          EnvironmentContext environmentContext
-  ) throws InvalidOperationException, MetaException, TException {
-    glueMetastoreClientDelegate.alterTable(dbName, tblName, table, environmentContext);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition appendPartition(String dbName, String tblName, List<String> values)
-          throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException, TException {
-    return glueMetastoreClientDelegate.appendPartition(dbName, tblName, values);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition appendPartition(String catalogName, String dbName, String tblName, List<String> values)
-          throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException, TException {
-    return glueMetastoreClientDelegate.appendPartition(dbName, tblName, values);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition appendPartition(String dbName, String tblName, String partitionName) throws InvalidObjectException,
-          org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException, TException {
-    List<String> partVals = partitionNameToVals(partitionName);
-    return glueMetastoreClientDelegate.appendPartition(dbName, tblName, partVals);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition appendPartition(String catalogName, String dbName, String tblName, String partitionName)
-          throws InvalidObjectException, org.apache.hadoop.hive.metastore.api.AlreadyExistsException, MetaException, TException {
-    List<String> partVals = partitionNameToVals(partitionName);
-    return glueMetastoreClientDelegate.appendPartition(dbName, tblName, partVals);
-  }
-
-  @Override
-  public boolean create_role(org.apache.hadoop.hive.metastore.api.Role role) throws MetaException, TException {
-    return glueMetastoreClientDelegate.createRole(role);
-  }
-
-  @Override
-  public boolean drop_role(String roleName) throws MetaException, TException {
-    return glueMetastoreClientDelegate.dropRole(roleName);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Role> list_roles(
-          String principalName, org.apache.hadoop.hive.metastore.api.PrincipalType principalType
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.listRoles(principalName, principalType);
-  }
-
-  @Override
-  public List<String> listRoleNames() throws MetaException, TException {
-    return glueMetastoreClientDelegate.listRoleNames();
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse get_principals_in_role(
-          org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest request) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getPrincipalsInRole(request);
-  }
-
-  @Override
-  public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
-          GetRoleGrantsForPrincipalRequest request) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getRoleGrantsForPrincipal(request);
-  }
-
-  @Override
-  public boolean grant_role(
-          String roleName,
-          String userName,
-          org.apache.hadoop.hive.metastore.api.PrincipalType principalType,
-          String grantor, org.apache.hadoop.hive.metastore.api.PrincipalType grantorType,
-          boolean grantOption
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.grantRole(roleName, userName, principalType, grantor, grantorType, grantOption);
-  }
-
-  @Override
-  public boolean revoke_role(
-          String roleName,
-          String userName,
-          org.apache.hadoop.hive.metastore.api.PrincipalType principalType,
-          boolean grantOption
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.revokeRole(roleName, userName, principalType, grantOption);
-  }
-
-  @Override
-  public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
-    glueMetastoreClientDelegate.cancelDelegationToken(tokenStrForm);
-  }
-
-  @Override
-  public String getTokenStrForm() throws IOException {
-    return glueMetastoreClientDelegate.getTokenStrForm();
-  }
-
-  @Override
-  public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
-    return glueMetastoreClientDelegate.addToken(tokenIdentifier, delegationToken);
-  }
-
-  @Override
-  public boolean removeToken(String tokenIdentifier) throws TException {
-    return glueMetastoreClientDelegate.removeToken(tokenIdentifier);
-  }
-
-  @Override
-  public String getToken(String tokenIdentifier) throws TException {
-    return glueMetastoreClientDelegate.getToken(tokenIdentifier);
-  }
-
-  @Override
-  public List<String> getAllTokenIdentifiers() throws TException {
-    return glueMetastoreClientDelegate.getAllTokenIdentifiers();
-  }
-
-  @Override
-  public int addMasterKey(String key) throws MetaException, TException {
-    return glueMetastoreClientDelegate.addMasterKey(key);
-  }
-
-  @Override
-  public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException, TException {
-    glueMetastoreClientDelegate.updateMasterKey(seqNo, key);
-  }
-
-  @Override
-  public boolean removeMasterKey(Integer keySeq) throws TException {
-    return glueMetastoreClientDelegate.removeMasterKey(keySeq);
-  }
-
-  @Override
-  public String[] getMasterKeys() throws TException {
-    return glueMetastoreClientDelegate.getMasterKeys();
-  }
-
-  @Override
-  public LockResponse checkLock(long lockId)
-          throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException {
-    return glueMetastoreClientDelegate.checkLock(lockId);
-  }
-
-  @Override
-  public void close() {
-    currentMetaVars = null;
-  }
-
-  @Override
-  public void commitTxn(long txnId) throws NoSuchTxnException, TxnAbortedException, TException {
-    glueMetastoreClientDelegate.commitTxn(txnId);
-  }
-
-  @Override
-  public void replCommitTxn(long srcTxnid, String replPolicy) throws NoSuchTxnException, TxnAbortedException, TException {
-    glueMetastoreClientDelegate.replCommitTxn(srcTxnid, replPolicy);
-  }
-
-  @Override
-  public void abortTxns(List<Long> txnIds) throws TException {
-    glueMetastoreClientDelegate.abortTxns(txnIds);
-  }
-
-  @Override
-  public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException {
-    throw new UnsupportedOperationException("allocateTableWriteId is not supported.");
-  }
-
-  @Override
-  public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List<String> partNames) throws TException {
-    throw new UnsupportedOperationException("replTableWriteIdState is not supported.");
-  }
-
-  @Override
-  public List<TxnToWriteId> allocateTableWriteIdsBatch(List<Long> txnIds, String dbName, String tableName) throws TException {
-    throw new UnsupportedOperationException("allocateTableWriteIdsBatch is not supported.");
-  }
-
-  @Override
-  public List<TxnToWriteId> replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy,
-          List<TxnToWriteId> srcTxnToWriteIdList) throws TException {
-    throw new UnsupportedOperationException("replAllocateTableWriteIdsBatch is not supported.");
-  }
-
-  @Deprecated
-  public void compact(
-          String dbName,
-          String tblName,
-          String partitionName,
-          CompactionType compactionType
-  ) throws TException {
-    glueMetastoreClientDelegate.compact(dbName, tblName, partitionName, compactionType);
-  }
-
-  @Deprecated
-  public void compact(
-          String dbName,
-          String tblName,
-          String partitionName,
-          CompactionType compactionType,
-          Map<String, String> tblProperties
-  ) throws TException {
-    glueMetastoreClientDelegate.compact(dbName, tblName, partitionName, compactionType, tblProperties);
-  }
-
-  @Override
-  public CompactionResponse compact2(
-          String dbName,
-          String tblName,
-          String partitionName,
-          CompactionType compactionType,
-          Map<String, String> tblProperties
-  ) throws TException {
-    return glueMetastoreClientDelegate.compact2(dbName, tblName, partitionName, compactionType, tblProperties);
-  }
-
-  @Override
-  public void createFunction(org.apache.hadoop.hive.metastore.api.Function function) throws InvalidObjectException, MetaException, TException {
-    glueMetastoreClientDelegate.createFunction(function);
-  }
-
-  @Override
-  public void createTable(Table tbl) throws org.apache.hadoop.hive.metastore.api.AlreadyExistsException, InvalidObjectException, MetaException,
-          NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.createTable(tbl);
-  }
-
-  @Override
-  public boolean deletePartitionColumnStatistics(
-          String dbName, String tableName, String partName, String colName
-  ) throws NoSuchObjectException, MetaException, InvalidObjectException,
-          TException, org.apache.hadoop.hive.metastore.api.InvalidInputException {
-    return glueMetastoreClientDelegate.deletePartitionColumnStatistics(dbName, tableName, partName, colName);
-  }
-
-  @Override
-  public boolean deletePartitionColumnStatistics(String catalogName, String dbName, String tableName, String partName, String colName)
-          throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException {
-    return glueMetastoreClientDelegate.deletePartitionColumnStatistics(dbName, tableName, partName, colName);
-  }
-
-  @Override
-  public boolean deleteTableColumnStatistics(
-          String dbName, String tableName, String colName
-  ) throws NoSuchObjectException, MetaException, InvalidObjectException,
-          TException, org.apache.hadoop.hive.metastore.api.InvalidInputException {
-    return glueMetastoreClientDelegate.deleteTableColumnStatistics(dbName, tableName, colName);
-  }
-
-  @Override
-  public boolean deleteTableColumnStatistics(String catalogName, String dbName, String tableName, String colName)
-          throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException {
-    return glueMetastoreClientDelegate.deleteTableColumnStatistics(dbName, tableName, colName);
-  }
-
-  @Override
-  public void dropFunction(String dbName, String functionName) throws MetaException, NoSuchObjectException,
-          InvalidObjectException, org.apache.hadoop.hive.metastore.api.InvalidInputException, TException {
-    glueMetastoreClientDelegate.dropFunction(dbName, functionName);
-  }
-
-  @Override
-  public void dropFunction(String catalogName, String dbName, String functionName)
-          throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, TException {
-    glueMetastoreClientDelegate.dropFunction(dbName, functionName);
-  }
-
-  private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) throws IOException, MetaException {
-    if (depth > 0 && parent != null && wh.isWritable(parent) && wh.isEmpty(parent)) {
-      wh.deleteDir(parent, true, mustPurge, true);
-      deleteParentRecursive(parent.getParent(), depth - 1, mustPurge);
-    }
-  }
-
-  // This logic is taken from HiveMetaStore#isMustPurge
-  private boolean isMustPurge(Table table, boolean ifPurge) {
-    return (ifPurge || "true".equalsIgnoreCase(table.getParameters().get("auto.purge")));
-  }
-
-  @Override
-  public boolean dropPartition(String dbName, String tblName, List<String> values, boolean deleteData)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, false, deleteData, false);
-  }
-
-  @Override
-  public boolean dropPartition(String catalogName, String dbName, String tblName, List<String> values, boolean deleteData)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, false, deleteData, false);
-  }
-
-  @Override
-  public boolean dropPartition(String dbName, String tblName, List<String> values, PartitionDropOptions options) throws TException {
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, options.ifExists, options.deleteData, options.purgeData);
-  }
-
-  @Override
-  public boolean dropPartition(String catalogName, String dbName, String tblName, List<String> values, PartitionDropOptions options)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, options.ifExists, options.deleteData, options.purgeData);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions(
-          String dbName,
-          String tblName,
-          List<ObjectPair<Integer, byte[]>> partExprs,
-          boolean deleteData,
-          boolean ifExists
-  ) throws NoSuchObjectException, MetaException, TException {
-    //use defaults from PartitionDropOptions for purgeData
-    return dropPartitions_core(dbName, tblName, partExprs, deleteData, false);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions(
-          String dbName,
-          String tblName,
-          List<ObjectPair<Integer, byte[]>> partExprs,
-          boolean deleteData,
-          boolean ifExists,
-          boolean needResults
-  ) throws NoSuchObjectException, MetaException, TException {
-    return dropPartitions_core(dbName, tblName, partExprs, deleteData, false);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions(
-          String dbName,
-          String tblName,
-          List<ObjectPair<Integer, byte[]>> partExprs,
-          PartitionDropOptions options
-  ) throws NoSuchObjectException, MetaException, TException {
-    return dropPartitions_core(dbName, tblName, partExprs, options.deleteData, options.purgeData);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions(
-          String catalogName,
-          String dbName,
-          String tblName,
-          List<ObjectPair<Integer, byte[]>> partExprs,
-          PartitionDropOptions options
-  ) throws NoSuchObjectException, MetaException, TException {
-    return dropPartitions_core(dbName, tblName, partExprs, options.deleteData, options.purgeData);
-  }
-
-  @Override
-  public boolean dropPartition(String dbName, String tblName, String partitionName, boolean deleteData)
-          throws NoSuchObjectException, MetaException, TException {
-    List<String> values = partitionNameToVals(partitionName);
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, false, deleteData, false);
-  }
-
-  @Override
-  public boolean dropPartition(String catalogName, String dbName, String tblName, String partitionName, boolean deleteData)
-          throws NoSuchObjectException, MetaException, TException {
-    List<String> values = partitionNameToVals(partitionName);
-    return glueMetastoreClientDelegate.dropPartition(dbName, tblName, values, false, deleteData, false);
-  }
-
-  private List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions_core(
-          String databaseName,
-          String tableName,
-          List<ObjectPair<Integer, byte[]>> partExprs,
-          boolean deleteData,
-          boolean purgeData
-  ) throws TException {
-    List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
-    for (ObjectPair<Integer, byte[]> expr : partExprs) {
-      byte[] tmp = expr.getSecond();
-      String exprString = ExpressionHelper.convertHiveExpressionToCatalogExpression(tmp);
-      List<Partition> catalogPartitionsToDelete = glueMetastoreClientDelegate.getCatalogPartitions(databaseName, tableName, exprString, -1);
-      deleted.addAll(batchDeletePartitions(databaseName, tableName, catalogPartitionsToDelete, deleteData, purgeData));
-    }
-    return deleted;
-  }
-
-  /**
-   * Delete all partitions in the list provided with BatchDeletePartitions request. It doesn't use transaction,
-   * so the call may result in partial failure.
-   * @param dbName
-   * @param tableName
-   * @param partitionsToDelete
-   * @return the partitions successfully deleted
-   * @throws TException
-   */
-  private List<org.apache.hadoop.hive.metastore.api.Partition> batchDeletePartitions(
-          final String dbName, final String tableName, final List<Partition> partitionsToDelete,
-          final boolean deleteData, final boolean purgeData) throws TException {
-
-    List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
-    if (partitionsToDelete == null) {
-      return deleted;
-    }
-
-    validateBatchDeletePartitionsArguments(dbName, tableName, partitionsToDelete);
-
-    List<Future<BatchDeletePartitionsHelper>> batchDeletePartitionsFutures = Lists.newArrayList();
-
-    int numOfPartitionsToDelete = partitionsToDelete.size();
-    for (int i = 0; i < numOfPartitionsToDelete; i += BATCH_DELETE_PARTITIONS_PAGE_SIZE) {
-      int j = Math.min(i + BATCH_DELETE_PARTITIONS_PAGE_SIZE, numOfPartitionsToDelete);
-      final List<Partition> partitionsOnePage = partitionsToDelete.subList(i, j);
-
-      batchDeletePartitionsFutures.add(BATCH_DELETE_PARTITIONS_THREAD_POOL.submit(new Callable<BatchDeletePartitionsHelper>() {
-        @Override
-        public BatchDeletePartitionsHelper call() throws Exception {
-          return new BatchDeletePartitionsHelper(glueClient, dbName, tableName, catalogId, partitionsOnePage).deletePartitions();
-        }
-      }));
-    }
-
-    TException tException = null;
-    for (Future<BatchDeletePartitionsHelper> future : batchDeletePartitionsFutures) {
-      try {
-        BatchDeletePartitionsHelper batchDeletePartitionsHelper = future.get();
-        for (Partition partition : batchDeletePartitionsHelper.getPartitionsDeleted()) {
-          org.apache.hadoop.hive.metastore.api.Partition hivePartition =
-                  catalogToHiveConverter.convertPartition(partition);
-          try {
-            performDropPartitionPostProcessing(dbName, tableName, hivePartition, deleteData, purgeData);
-          } catch (TException e) {
-            logger.error("Drop partition directory failed.", e);
-            tException = tException == null ? e : tException;
-          }
-          deleted.add(hivePartition);
-        }
-        tException = tException == null ? batchDeletePartitionsHelper.getFirstTException() : tException;
-      } catch (Exception e) {
-        logger.error("Exception thrown by BatchDeletePartitions thread pool. ", e);
-      }
-    }
-
-    if (tException != null) {
-      throw tException;
-    }
-    return deleted;
-  }
-
-  private void validateBatchDeletePartitionsArguments(final String dbName, final String tableName,
-          final List<Partition> partitionsToDelete) {
-
-    Preconditions.checkArgument(dbName != null, "Database name cannot be null");
-    Preconditions.checkArgument(tableName != null, "Table name cannot be null");
-    for (Partition partition : partitionsToDelete) {
-      Preconditions.checkArgument(dbName.equals(partition.getDatabaseName()), "Database name cannot be null");
-      Preconditions.checkArgument(tableName.equals(partition.getTableName()), "Table name cannot be null");
-      Preconditions.checkArgument(partition.getValues() != null, "Partition values cannot be null");
-    }
-  }
-
-  // Preserve the logic from Hive metastore
-  private void performDropPartitionPostProcessing(String dbName, String tblName,
-          org.apache.hadoop.hive.metastore.api.Partition partition, boolean deleteData, boolean ifPurge)
-          throws MetaException, NoSuchObjectException, TException {
-    if (deleteData && partition.getSd() != null && partition.getSd().getLocation() != null) {
-      Path partPath = new Path(partition.getSd().getLocation());
-      Table table = getTable(dbName, tblName);
-      if (isExternalTable(table)){
-        //Don't delete external table data
-        return;
-      }
-      boolean mustPurge = isMustPurge(table, ifPurge);
-      wh.deleteDir(partPath, true, mustPurge, true);
-      try {
-        List<String> values = partition.getValues();
-        deleteParentRecursive(partPath.getParent(), values.size() - 1, mustPurge);
-      } catch (IOException e) {
-        throw new MetaException(e.getMessage());
-      }
-    }
-  }
-
-  @Deprecated
-  public void dropTable(String tableName, boolean deleteData) throws MetaException, UnknownTableException, TException,
-          NoSuchObjectException {
-    dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false);
-  }
-
-  @Override
-  public void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException {
-    dropTable(dbname, tableName, true, true, false);
-  }
-
-  @Override
-  public void dropTable(
-          String catName,
-          String dbName,
-          String tableName,
-          boolean deleteData,
-          boolean ignoreUnknownTable,
-          boolean ifPurge
-  ) throws MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.dropTable(dbName, tableName, deleteData, ignoreUnknownTable, ifPurge);
-  }
-
-  @Override
-  public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
-    throw new UnsupportedOperationException("truncateTable is not supported");
-  }
-
-  @Override
-  public void truncateTable(String catalogName, String dbName, String tableName, List<String> partNames) throws MetaException, TException {
-    throw new UnsupportedOperationException("truncateTable is not supported");
-  }
-
-  @Override
-  public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest cmRecycleRequest) throws MetaException, TException {
-    // Taken from HiveMetaStore#cm_recycle
-    wh.recycleDirToCmPath(new Path(cmRecycleRequest.getDataPath()), cmRecycleRequest.isPurge());
-    return new CmRecycleResponse();
-  }
-
-  @Override
-  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab)
-          throws MetaException, TException, NoSuchObjectException {
-    dropTable(dbname, tableName, deleteData, ignoreUnknownTab, false);
-  }
-
-  @Override
-  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge)
-          throws MetaException, TException, NoSuchObjectException {
-    glueMetastoreClientDelegate.dropTable(dbname, tableName, deleteData, ignoreUnknownTab, ifPurge);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition exchange_partition(
-          Map<String, String> partitionSpecs,
-          String srcDb,
-          String srcTbl,
-          String dstDb,
-          String dstTbl
-  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-    return glueMetastoreClientDelegate.exchangePartition(partitionSpecs, srcDb, srcTbl, dstDb, dstTbl);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition exchange_partition(
-          Map<String, String> partitionSpecs,
-          String sourceCat,
-          String sourceDb,
-          String sourceTable,
-          String destCat,
-          String destdb,
-          String destTableName
-  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-    return glueMetastoreClientDelegate.exchangePartition(partitionSpecs, sourceDb, sourceTable, destdb, destTableName);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> exchange_partitions(
-          Map<String, String> partitionSpecs,
-          String sourceDb,
-          String sourceTbl,
-          String destDb,
-          String destTbl
-  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-    return glueMetastoreClientDelegate.exchangePartitions(partitionSpecs, sourceDb, sourceTbl, destDb, destTbl);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> exchange_partitions(
-          Map<String, String> partitionSpecs,
-          String sourceCat,
-          String sourceDb,
-          String sourceTbl,
-          String destCat,
-          String destDb,
-          String destTbl
-  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
-    return glueMetastoreClientDelegate.exchangePartitions(partitionSpecs, sourceDb, sourceTbl, destDb, destTbl);
-  }
-
-  @Override
-  public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames, List<String> partName)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getAggrColStatsFor(dbName, tblName, colNames, partName);
-  }
-
-  @Override
-  public AggrStats getAggrColStatsFor(String catalogName, String dbName, String tblName, List<String> colNames, List<String> partName)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getAggrColStatsFor(dbName, tblName, colNames, partName);
-  }
-
-  @Override
-  public List<String> getAllTables(String dbname) throws MetaException, TException, UnknownDBException {
-    return getTables(dbname, ".*");
-  }
-
-  @Override
-  public List<String> getAllTables(String catalogName, String dbname) throws MetaException, TException, UnknownDBException {
-    return getTables(dbname, ".*");
-  }
-
-  @Override
-  public String getConfigValue(String name, String defaultValue) throws TException, ConfigValSecurityException {
-    if (name == null) {
-      return defaultValue;
-    }
-
-    if(!Pattern.matches("(hive|hdfs|mapred|metastore).*", name)) {
-      throw new ConfigValSecurityException("For security reasons, the config key " + name + " cannot be accessed");
-    }
-
-    return conf.get(name, defaultValue);
-  }
-
-  @Override
-  public String getDelegationToken(
-          String owner, String renewerKerberosPrincipalName
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getDelegationToken(owner, renewerKerberosPrincipalName);
-  }
-
-  @Override
-  public List<FieldSchema> getFields(String db, String tableName) throws MetaException, TException,
-          UnknownTableException, UnknownDBException {
-    return glueMetastoreClientDelegate.getFields(db, tableName);
-  }
-
-  @Override
-  public List<FieldSchema> getFields(String catalogName, String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException {
-    return glueMetastoreClientDelegate.getFields(db, tableName);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Function getFunction(String dbName, String functionName) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getFunction(dbName, functionName);
-  }
-
-  @Override
-  public Function getFunction(String catalogName, String dbName, String functionName) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getFunction(dbName, functionName);
-  }
-
-  @Override
-  public List<String> getFunctions(String dbName, String pattern) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getFunctions(dbName, pattern);
-  }
-
-  @Override
-  public List<String> getFunctions(String catalogName, String dbName, String pattern) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getFunctions(dbName, pattern);
-  }
-
-  @Override
-  public GetAllFunctionsResponse getAllFunctions() throws MetaException, TException {
-    return glueMetastoreClientDelegate.getAllFunctions();
-  }
-
-  @Override
-  public String getMetaConf(String key) throws MetaException, TException {
-    MetastoreConf.ConfVars metaConfVar = MetastoreConf.getMetaConf(key);
-    if (metaConfVar == null) {
-      throw new MetaException("Invalid configuration key " + key);
-    }
-    return conf.get(key, metaConfVar.getDefaultVal().toString());
-  }
-
-  @Override
-  public void createCatalog(Catalog catalog) throws org.apache.hadoop.hive.metastore.api.AlreadyExistsException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("createCatalog is not supported");
-  }
-
-  @Override
-  public void alterCatalog(String s, Catalog catalog) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("alterCatalog is not supported");
-  }
-
-  @Override
-  public Catalog getCatalog(String s) throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("getCatalog is not supported");
-  }
-
-  @Override
-  public List<String> getCatalogs() throws MetaException, TException {
-    throw new UnsupportedOperationException("getCatalogs is not supported");
-  }
-
-  @Override
-  public void dropCatalog(String s) throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
-    throw new UnsupportedOperationException("dropCatalog is not supported");
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartition(String dbName, String tblName, List<String> values)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartition(dbName, tblName, values);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartition(String catalogName, String dbName, String tblName, List<String> values) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartition(dbName, tblName, values);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartition(String dbName, String tblName, String partitionName)
-          throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.getPartition(dbName, tblName, partitionName);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartition(String catalogName, String dbName, String tblName, String partitionName) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.getPartition(dbName, tblName, partitionName);
-  }
-
-  @Override
-  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
-          String dbName,
-          String tableName,
-          List<String> partitionNames,
-          List<String> columnNames
-  ) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitionColumnStatistics(dbName, tableName, partitionNames, columnNames);
-  }
-
-  @Override
-  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
-          String catalogName,
-          String dbName,
-          String tableName,
-          List<String> partitionNames,
-          List<String> columnNames) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitionColumnStatistics(dbName, tableName, partitionNames, columnNames);
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartitionWithAuthInfo(
-          String databaseName, String tableName, List<String> values,
-          String userName, List<String> groupNames)
-          throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-
-    // TODO move this into the service
-    org.apache.hadoop.hive.metastore.api.Partition partition = getPartition(databaseName, tableName, values);
-    Table table = getTable(databaseName, tableName);
-    if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
-      String partName = Warehouse.makePartName(table.getPartitionKeys(), values);
-      HiveObjectRef obj = new HiveObjectRef();
-      obj.setObjectType(HiveObjectType.PARTITION);
-      obj.setDbName(databaseName);
-      obj.setObjectName(tableName);
-      obj.setPartValues(values);
-      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet privilegeSet =
-              this.get_privilege_set(obj, userName, groupNames);
-      partition.setPrivileges(privilegeSet);
-    }
-
-    return partition;
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.Partition getPartitionWithAuthInfo(
-          String catalogName,
-          String databaseName,
-          String tableName,
-          List<String> values,
-          String userName,
-          List<String> groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
-    return getPartitionWithAuthInfo(databaseName, tableName, values, userName, groupNames);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> getPartitionsByNames(
-          String databaseName, String tableName, List<String> partitionNames)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitionsByNames(databaseName, tableName, partitionNames);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> getPartitionsByNames(
-          String catalogName,
-          String databaseName,
-          String tableName,
-          List<String> partitionNames
-  ) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitionsByNames(databaseName, tableName, partitionNames);
-  }
-
-  @Override
-  public List<FieldSchema> getSchema(String db, String tableName) throws MetaException, TException, UnknownTableException,
-          UnknownDBException {
-    return glueMetastoreClientDelegate.getSchema(db, tableName);
-  }
-
-  @Override
-  public List<FieldSchema> getSchema(String catalogName, String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException {
-    return glueMetastoreClientDelegate.getSchema(db, tableName);
-  }
-
-  @Override
-  public Table getTable(String dbName, String tableName)
-          throws MetaException, TException, NoSuchObjectException {
-    return glueMetastoreClientDelegate.getTable(dbName, tableName);
-  }
-
-  @Override
-  public Table getTable(String catalogName, String dbName, String tableName) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getTable(dbName, tableName);
-  }
-
-  @Override
-  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName, List<String> colNames)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getTableColumnStatistics(dbName, tableName, colNames);
-  }
-
-  @Override
-  public List<ColumnStatisticsObj> getTableColumnStatistics(String catalogName, String dbName, String tableName, List<String> colNames) throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getTableColumnStatistics(dbName, tableName, colNames);
-  }
-
-  @Override
-  public List<Table> getTableObjectsByName(String dbName, List<String> tableNames) throws MetaException,
-          InvalidOperationException, UnknownDBException, TException {
-    List<Table> hiveTables = Lists.newArrayList();
-    for(String tableName : tableNames) {
-      hiveTables.add(getTable(dbName, tableName));
-    }
-
-    return hiveTables;
-  }
-
-  @Override
-  public List<Table> getTableObjectsByName(String catalogName, String dbName, List<String> tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException {
-    return getTableObjectsByName(dbName, tableNames);
-  }
-
-  @Override
-  public Materialization getMaterializationInvalidationInfo(CreationMetadata creationMetadata, String validTxnList) throws MetaException, InvalidOperationException, UnknownDBException, TException {
-    throw new UnsupportedOperationException("getMaterializationInvalidationInfo is not supported");
-  }
-
-  @Override
-  public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws MetaException, TException {
-    throw new UnsupportedOperationException("getMaterializationInvalidationInfo is not supported");
-  }
-
-  @Override
-  public void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) throws MetaException, TException {
-    throw new UnsupportedOperationException("getMaterializationInvalidationInfo is not supported");
-  }
-
-  @Override
-  public List<String> getTables(String dbname, String tablePattern) throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTables(dbname, tablePattern);
-  }
-
-  @Override
-  public List<String> getTables(String catalogName, String dbname, String tablePattern) throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTables(dbname, tablePattern);
-  }
-
-  @Override
-  public List<String> getTables(String dbname, String tablePattern, TableType tableType)
-          throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTables(dbname, tablePattern, tableType);
-  }
-
-  @Override
-  public List<String> getTables(String catalogName, String dbname, String tablePattern, TableType tableType) throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTables(dbname, tablePattern, tableType);
-  }
-
-  @Override
-  public List<String> getMaterializedViewsForRewriting(String dbName) throws MetaException, TException, UnknownDBException {
-    // not supported
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<String> getMaterializedViewsForRewriting(String catalogName, String dbName) throws MetaException, TException, UnknownDBException {
-    // not supported
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
-          throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTableMeta(dbPatterns, tablePatterns, tableTypes);
-  }
-
-  @Override
-  public List<TableMeta> getTableMeta(String catalogName, String dbPatterns, String tablePatterns, List<String> tableTypes) throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.getTableMeta(dbPatterns, tablePatterns, tableTypes);
-  }
-
-  @Override
-  public ValidTxnList getValidTxns() throws TException {
-    return glueMetastoreClientDelegate.getValidTxns();
-  }
-
-  @Override
-  public ValidTxnList getValidTxns(long currentTxn) throws TException {
-    return glueMetastoreClientDelegate.getValidTxns(currentTxn);
-  }
-
-  @Override
-  public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException {
-    throw new UnsupportedOperationException("getValidWriteIds is not supported");
-  }
-
-  @Override
-  public List<TableValidWriteIds> getValidWriteIds(List<String> tablesList, String validTxnList) throws TException {
-    throw new UnsupportedOperationException("getValidWriteIds is not supported");
-  }
-
-  @Override
-  public org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet get_privilege_set(
-          HiveObjectRef obj,
-          String user, List<String> groups
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.getPrivilegeSet(obj, user, groups);
-  }
-
-  @Override
-  public boolean grant_privileges(org.apache.hadoop.hive.metastore.api.PrivilegeBag privileges)
-          throws MetaException, TException {
-    return glueMetastoreClientDelegate.grantPrivileges(privileges);
-  }
-
-  @Override
-  public boolean revoke_privileges(
-          org.apache.hadoop.hive.metastore.api.PrivilegeBag privileges,
-          boolean grantOption
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.revokePrivileges(privileges, grantOption);
-  }
-
-  @Override
-  public boolean refresh_privileges(HiveObjectRef hiveObjectRef, String s, PrivilegeBag privilegeBag) throws MetaException, TException {
-    throw new UnsupportedOperationException("refresh_privileges is not supported");
-  }
-
-  @Override
-  public void heartbeat(long txnId, long lockId)
-          throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException {
-    glueMetastoreClientDelegate.heartbeat(txnId, lockId);
-  }
-
-  @Override
-  public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException {
-    return glueMetastoreClientDelegate.heartbeatTxnRange(min, max);
-  }
-
-  @Override
-  public boolean isCompatibleWith(Configuration conf) {
-    if (currentMetaVars == null) {
-      return false; // recreate
-    }
-    boolean compatible = true;
-    for (MetastoreConf.ConfVars oneVar : MetastoreConf.metaVars) {
-      // Since metaVars are all of different types, use string for comparison
-      String oldVar = currentMetaVars.get(oneVar.getVarname());
-      String newVar = conf.get(oneVar.getVarname(), "");
-      if (oldVar == null ||
-              (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
-        logger.info("Mestastore configuration " + oneVar.getVarname() +
-                " changed from " + oldVar + " to " + newVar);
-        compatible = false;
-      }
-    }
-    return compatible;
-  }
-
-  @Override
-  public void setHiveAddedJars(String addedJars) {
-    //taken from HiveMetaStoreClient
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.ADDED_JARS, addedJars);
-  }
-
-  @Override
-  public boolean isLocalMetaStore() {
-    return false;
-  }
-
-  private void snapshotActiveConf() {
-    currentMetaVars = new HashMap<String, String>(MetastoreConf.metaVars.length);
-    for (MetastoreConf.ConfVars oneVar : MetastoreConf.metaVars) {
-      currentMetaVars.put(oneVar.getVarname(), conf.get(oneVar.getVarname(), ""));
-    }
-  }
-
-  @Override
-  public boolean isPartitionMarkedForEvent(String dbName, String tblName, Map<String, String> partKVs, PartitionEventType eventType)
-          throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
-          UnknownPartitionException, InvalidPartitionException {
-    return glueMetastoreClientDelegate.isPartitionMarkedForEvent(dbName, tblName, partKVs, eventType);
-  }
-
-  @Override
-  public boolean isPartitionMarkedForEvent(String catalogName, String dbName, String tblName, Map<String, String> partKVs, PartitionEventType eventType)
-          throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException {
-    return glueMetastoreClientDelegate.isPartitionMarkedForEvent(dbName, tblName, partKVs, eventType);
-  }
-
-  @Override
-  public List<String> listPartitionNames(String dbName, String tblName, short max)
-          throws MetaException, TException {
-    try {
-      return glueMetastoreClientDelegate.listPartitionNames(dbName, tblName, null, max);
-    } catch (NoSuchObjectException e) {
-      // For compatibility with Hive 1.0.0
-      return Collections.emptyList();
-    }
-  }
-
-  @Override
-  public List<String> listPartitionNames(String catalogName, String dbName, String tblName, int maxParts)
-          throws NoSuchObjectException, MetaException, TException {
-    return listPartitionNames(dbName, tblName, (short) maxParts);
-  }
-
-  @Override
-  public List<String> listPartitionNames(String databaseName, String tableName,
-          List<String> values, short max)
-          throws MetaException, TException, NoSuchObjectException {
-    return glueMetastoreClientDelegate.listPartitionNames(databaseName, tableName, values, max);
-  }
-
-  @Override
-  public List<String> listPartitionNames(String catalogName, String databaseName, String tableName, List<String> values, int max)
-          throws MetaException, TException, NoSuchObjectException {
-    return listPartitionNames(databaseName, tableName, values, (short) max);
-  }
-
-  @Override
-  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest partitionValuesRequest) throws TException {
-    return glueMetastoreClientDelegate.listPartitionValues(partitionValuesRequest);
-  }
-
-  @Override
-  public int getNumPartitionsByFilter(String dbName, String tableName, String filter)
-          throws MetaException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.getNumPartitionsByFilter(dbName, tableName, filter);
-  }
-
-  @Override
-  public int getNumPartitionsByFilter(String catalogName, String dbName, String tableName, String filter)
-          throws MetaException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.getNumPartitionsByFilter(dbName, tableName, filter);
-  }
-
-  @Override
-  public PartitionSpecProxy listPartitionSpecs(String dbName, String tblName, int max) throws TException {
-    return glueMetastoreClientDelegate.listPartitionSpecs(dbName, tblName, max);
-  }
-
-  @Override
-  public PartitionSpecProxy listPartitionSpecs(String catalogName, String dbName, String tblName, int max) throws TException {
-    return glueMetastoreClientDelegate.listPartitionSpecs(dbName, tblName, max);
-  }
-
-  @Override
-  public PartitionSpecProxy listPartitionSpecsByFilter(String dbName, String tblName, String filter, int max)
-          throws MetaException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.listPartitionSpecsByFilter(dbName, tblName, filter, max);
-  }
-
-  @Override
-  public PartitionSpecProxy listPartitionSpecsByFilter(String catalogName, String dbName, String tblName, String filter, int max)
-          throws MetaException, NoSuchObjectException, TException {
-    return glueMetastoreClientDelegate.listPartitionSpecsByFilter(dbName, tblName, filter, max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(String dbName, String tblName, short max)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitions(dbName, tblName, null, max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(String catalogName, String dbName, String tblName, int max)
-          throws NoSuchObjectException, MetaException, TException {
-    return glueMetastoreClientDelegate.getPartitions(dbName, tblName, null, max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(
-          String databaseName,
-          String tableName,
-          List<String> values,
-          short max
-  ) throws NoSuchObjectException, MetaException, TException {
-    String expression = null;
-    if (values != null) {
-      Table table = getTable(databaseName, tableName);
-      expression = ExpressionHelper.buildExpressionFromPartialSpecification(table, values);
-    }
-    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, expression, (long) max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(
-          String catalogName,
-          String databaseName,
-          String tableName,
-          List<String> values,
-          int max) throws NoSuchObjectException, MetaException, TException {
-    return listPartitions(databaseName, tableName, values, (short) max);
-  }
-
-  @Override
-  public boolean listPartitionsByExpr(
-          String databaseName,
-          String tableName,
-          byte[] expr,
-          String defaultPartitionName,
-          short max,
-          List<org.apache.hadoop.hive.metastore.api.Partition> result
-  ) throws TException {
-    checkNotNull(result, "The result argument cannot be null.");
-
-    String catalogExpression =  ExpressionHelper.convertHiveExpressionToCatalogExpression(expr);
-    List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
-            glueMetastoreClientDelegate.getPartitions(databaseName, tableName, catalogExpression, (long) max);
-    result.addAll(partitions);
-
-    return false;
-  }
-
-  @Override
-  public boolean listPartitionsByExpr(
-          String catalogName,
-          String databaseName,
-          String tableName,
-          byte[] expr,
-          String defaultPartitionName,
-          int max,
-          List<org.apache.hadoop.hive.metastore.api.Partition> result) throws TException {
-    return listPartitionsByExpr(databaseName, tableName, expr, defaultPartitionName, (short) max, result);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsByFilter(
-          String databaseName,
-          String tableName,
-          String filter,
-          short max
-  ) throws MetaException, NoSuchObjectException, TException {
-    // we need to replace double quotes with single quotes in the filter expression
-    // since server side does not accept double quote expressions.
-    if (StringUtils.isNotBlank(filter)) {
-      filter = ExpressionHelper.replaceDoubleQuoteWithSingleQuotes(filter);
-    }
-    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, filter, (long) max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsByFilter(
-          String catalogName,
-          String databaseName,
-          String tableName,
-          String filter,
-          int max) throws MetaException, NoSuchObjectException, TException {
-    return listPartitionsByFilter(databaseName, tableName, filter, (short) max);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table, short maxParts,
-          String user, List<String> groups)
-          throws MetaException, TException, NoSuchObjectException {
-    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, maxParts);
-
-    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
-      HiveObjectRef obj = new HiveObjectRef();
-      obj.setObjectType(HiveObjectType.PARTITION);
-      obj.setDbName(database);
-      obj.setObjectName(table);
-      obj.setPartValues(p.getValues());
-      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set = this.get_privilege_set(obj, user, groups);
-      p.setPrivileges(set);
-    }
-
-    return partitions;
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(
-          String catalogName,
-          String database,
-          String table,
-          int maxParts,
-          String user,
-          List<String> groups
-  ) throws MetaException, TException, NoSuchObjectException {
-    return listPartitionsWithAuthInfo(database, table, (short) maxParts, user, groups);
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table,
-          List<String> partVals, short maxParts,
-          String user, List<String> groups) throws MetaException, TException, NoSuchObjectException {
-    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, partVals, maxParts);
-
-    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
-      HiveObjectRef obj = new HiveObjectRef();
-      obj.setObjectType(HiveObjectType.PARTITION);
-      obj.setDbName(database);
-      obj.setObjectName(table);
-      obj.setPartValues(p.getValues());
-      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set;
-      try {
-        set = get_privilege_set(obj, user, groups);
-      } catch (MetaException e) {
-        logger.info(String.format("No privileges found for user: %s, "
-                + "groups: [%s]", user, LoggingHelper.concatCollectionToStringForLogging(groups, ",")));
-        set = new org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet();
-      }
-      p.setPrivileges(set);
-    }
-
-    return partitions;
-  }
-
-  @Override
-  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(
-          String catalogName,
-          String database,
-          String table,
-          List<String> partVals,
-          int maxParts,
-          String user,
-          List<String> groups) throws MetaException, TException, NoSuchObjectException {
-    return listPartitionsWithAuthInfo(database, table, partVals, (short) maxParts, user, groups);
-  }
-
-  @Override
-  public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables) throws MetaException,
-          TException, InvalidOperationException, UnknownDBException {
-    return glueMetastoreClientDelegate.listTableNamesByFilter(dbName, filter, maxTables);
-  }
-
-  @Override
-  public List<String> listTableNamesByFilter(String catalogName, String dbName, String filter, int maxTables) throws TException, InvalidOperationException, UnknownDBException {
-    return glueMetastoreClientDelegate.listTableNamesByFilter(dbName, filter, (short) maxTables);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> list_privileges(
-          String principal,
-          org.apache.hadoop.hive.metastore.api.PrincipalType principalType,
-          HiveObjectRef objectRef
-  ) throws MetaException, TException {
-    return glueMetastoreClientDelegate.listPrivileges(principal, principalType, objectRef);
-  }
-
-  @Override
-  public LockResponse lock(LockRequest lockRequest) throws NoSuchTxnException, TxnAbortedException, TException {
-    return glueMetastoreClientDelegate.lock(lockRequest);
-  }
-
-  @Override
-  public void markPartitionForEvent(
-          String dbName,
-          String tblName,
-          Map<String, String> partKVs,
-          PartitionEventType eventType
-  ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
-          UnknownPartitionException, InvalidPartitionException {
-    glueMetastoreClientDelegate.markPartitionForEvent(dbName, tblName, partKVs, eventType);
-  }
-
-  @Override
-  public void markPartitionForEvent(
-          String catalogName,
-          String dbName,
-          String tblName,
-          Map<String, String> partKVs,
-          PartitionEventType eventType
-  ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException {
-    glueMetastoreClientDelegate.markPartitionForEvent(dbName, tblName, partKVs, eventType);
-  }
-
-  @Override
-  public long openTxn(String user) throws TException {
-    return glueMetastoreClientDelegate.openTxn(user);
-  }
-
-  @Override
-  public List<Long> replOpenTxn(String replPolicy, List<Long> srcTxnIds, String user) throws TException {
-    throw new UnsupportedOperationException("replOpenTxn is not supported");
-  }
-
-  @Override
-  public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
-    return glueMetastoreClientDelegate.openTxns(user, numTxns);
-  }
-
-  @Override
-  public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException {
-    // Lifted from HiveMetaStore
-    if (name.length() == 0) {
-      return new HashMap<String, String>();
-    }
-    return Warehouse.makeSpecFromName(name);
-  }
-
-  @Override
-  public List<String> partitionNameToVals(String name) throws MetaException, TException {
-    return glueMetastoreClientDelegate.partitionNameToVals(name);
-  }
-
-  @Override
-  public void reconnect() throws MetaException {
-    // TODO reset active Hive confs for metastore glueClient
-    logger.debug("reconnect() was called.");
-  }
-
-  @Override
-  public void renamePartition(String dbName, String tblName, List<String> partitionValues,
-          org.apache.hadoop.hive.metastore.api.Partition newPartition)
-          throws InvalidOperationException, MetaException, TException {
-    throw new TException("Not implement yet");
-    // Commend out to avoid using shim
-    //// Set DDL time to now if not specified
-    //setDDLTime(newPartition);
-    //Table tbl;
-    //org.apache.hadoop.hive.metastore.api.Partition oldPart;
-    //
-    //try {
-    //  tbl = getTable(dbName, tblName);
-    //  oldPart = getPartition(dbName, tblName, partitionValues);
-    //} catch(NoSuchObjectException e) {
-    //  throw new InvalidOperationException(e.getMessage());
-    //}
-    //
-    //if(newPartition.getSd() == null || oldPart.getSd() == null ) {
-    //  throw new InvalidOperationException("Storage descriptor cannot be null");
-    //}
-    //
-    //// if an external partition is renamed, the location should not change
-    //if (!Strings.isNullOrEmpty(tbl.getTableType()) && tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
-    //  newPartition.getSd().setLocation(oldPart.getSd().getLocation());
-    //  renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
-    //} else {
-    //
-    //  Path destPath = getDestinationPathForRename(dbName, tbl, newPartition);
-    //  Path srcPath = new Path(oldPart.getSd().getLocation());
-    //  FileSystem srcFs = wh.getFs(srcPath);
-    //  FileSystem destFs = wh.getFs(destPath);
-    //
-    //  verifyDestinationLocation(srcFs, destFs, srcPath, destPath, tbl, newPartition);
-    //  newPartition.getSd().setLocation(destPath.toString());
-    //
-    //  renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
-    //  boolean success = true;
-    //  try{
-    //    if (srcFs.exists(srcPath)) {
-    //      //if destPath's parent path doesn't exist, we should mkdir it
-    //      Path destParentPath = destPath.getParent();
-    //      if (!hiveShims.mkdirs(wh, destParentPath)) {
-    //        throw new IOException("Unable to create path " + destParentPath);
-    //      }
-    //      wh.renameDir(srcPath, destPath, true);
-    //    }
-    //  } catch (IOException e) {
-    //    success = false;
-    //    throw new InvalidOperationException("Unable to access old location "
-    //          + srcPath + " for partition " + tbl.getDbName() + "."
-    //          + tbl.getTableName() + " " + partitionValues);
-    //  } finally {
-    //    if(!success) {
-    //      // revert metastore operation
-    //      renamePartitionInCatalog(dbName, tblName, newPartition.getValues(), oldPart);
-    //    }
-    //  }
-    //}
-  }
-
-  @Override
-  public void renamePartition(
-          String catalogName,
-          String dbName,
-          String tblName,
-          List<String> partitionValues,
-          org.apache.hadoop.hive.metastore.api.Partition newPartition
-  ) throws InvalidOperationException, MetaException, TException {
-    renamePartition(dbName, tblName, partitionValues, newPartition);
-  }
-
-  private void verifyDestinationLocation(FileSystem srcFs, FileSystem destFs, Path srcPath, Path destPath, Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
-          throws InvalidOperationException {
-    String oldPartLoc = srcPath.toString();
-    String newPartLoc = destPath.toString();
-
-    // check that src and dest are on the same file system
-    if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
-      throw new InvalidOperationException("table new location " + destPath
-              + " is on a different file system than the old location "
-              + srcPath + ". This operation is not supported");
-    }
-    try {
-      srcFs.exists(srcPath); // check that src exists and also checks
-      if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
-        throw new InvalidOperationException("New location for this partition "
-                + tbl.getDbName() + "." + tbl.getTableName() + "." + newPartition.getValues()
-                + " already exists : " + destPath);
-      }
-    } catch (IOException e) {
-      throw new InvalidOperationException("Unable to access new location "
-              + destPath + " for partition " + tbl.getDbName() + "."
-              + tbl.getTableName() + " " + newPartition.getValues());
-    }
-  }
-
-  private Path getDestinationPathForRename(String dbName, Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
-          throws InvalidOperationException, MetaException, TException {
-    throw new TException("Not implement yet");
-    // Commend out to avoid using shim
-    // try {
-    //   Path destPath = new Path(hiveShims.getDefaultTablePath(getDatabase(dbName), tbl.getTableName(), wh),
-    //         Warehouse.makePartName(tbl.getPartitionKeys(), newPartition.getValues()));
-    //   return constructRenamedPath(destPath, new Path(newPartition.getSd().getLocation()));
-    // } catch (NoSuchObjectException e) {
-    //   throw new InvalidOperationException(
-    //         "Unable to change partition or table. Database " + dbName + " does not exist"
-    //               + " Check metastore logs for detailed stack." + e.getMessage());
-    // }
-  }
-
-  private void setDDLTime(org.apache.hadoop.hive.metastore.api.Partition partition) {
-    if (partition.getParameters() == null ||
-            partition.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
-            Integer.parseInt(partition.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
-      partition.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
-              .currentTimeMillis() / 1000));
-    }
-  }
-
-  private void renamePartitionInCatalog(String databaseName, String tableName,
-          List<String> partitionValues, org.apache.hadoop.hive.metastore.api.Partition newPartition)
-          throws InvalidOperationException, MetaException, TException {
-    try {
-      glueClient.updatePartition(
-              new UpdatePartitionRequest()
-                      .withDatabaseName(databaseName)
-                      .withTableName(tableName)
-                      .withPartitionValueList(partitionValues)
-                      .withPartitionInput(GlueInputConverter.convertToPartitionInput(newPartition)));
-    } catch (AmazonServiceException e) {
-      throw catalogToHiveConverter.wrapInHiveException(e);
-    }
-  }
-
-  @Override
-  public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
-    return glueMetastoreClientDelegate.renewDelegationToken(tokenStrForm);
-  }
-
-  @Override
-  public void rollbackTxn(long txnId) throws NoSuchTxnException, TException {
-    glueMetastoreClientDelegate.rollbackTxn(txnId);
-  }
-
-  @Override
-  public void replRollbackTxn(long l, String s) throws NoSuchTxnException, TException {
-    throw new UnsupportedOperationException("replRollbackTxn is not supported");
-  }
-
-  @Override
-  public void setMetaConf(String key, String value) throws MetaException, TException {
-    MetastoreConf.ConfVars confVar = MetastoreConf.getMetaConf(key);
-    if (confVar == null) {
-      throw new MetaException("Invalid configuration key " + key);
-    }
-    try {
-      confVar.validate(value);
-    } catch (IllegalArgumentException e) {
-      throw new MetaException("Invalid configuration value " + value + " for key " + key +
-              " by " + e.getMessage());
-    }
-    conf.set(key, value);
-  }
-
-  @Override
-  public boolean setPartitionColumnStatistics(org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest request)
-          throws NoSuchObjectException, InvalidObjectException,
-          MetaException, TException, org.apache.hadoop.hive.metastore.api.InvalidInputException {
-    return glueMetastoreClientDelegate.setPartitionColumnStatistics(request);
-  }
-
-  @Override
-  public void flushCache() {
-    //no op
-  }
-
-  @Override
-  public Iterable<Map.Entry<Long, ByteBuffer>> getFileMetadata(List<Long> fileIds) throws TException {
-    return glueMetastoreClientDelegate.getFileMetadata(fileIds);
-  }
-
-  @Override
-  public Iterable<Map.Entry<Long, MetadataPpdResult>> getFileMetadataBySarg(
-          List<Long> fileIds,
-          ByteBuffer sarg,
-          boolean doGetFooters
-  ) throws TException {
-    return glueMetastoreClientDelegate.getFileMetadataBySarg(fileIds, sarg, doGetFooters);
-  }
-
-  @Override
-  public void clearFileMetadata(List<Long> fileIds) throws TException {
-    glueMetastoreClientDelegate.clearFileMetadata(fileIds);
-  }
-
-  @Override
-  public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws TException {
-    glueMetastoreClientDelegate.putFileMetadata(fileIds, metadata);
-  }
-
-  @Override
-  public boolean isSameConfObj(Configuration conf) {
-    //taken from HiveMetaStoreClient
-    return this.conf == conf;
-  }
-
-  @Override
-  public boolean cacheFileMetadata(String dbName, String tblName, String partName, boolean allParts) throws TException {
-    return glueMetastoreClientDelegate.cacheFileMetadata(dbName, tblName, partName, allParts);
-  }
-
-  @Override
-  public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest primaryKeysRequest) throws TException {
-    // PrimaryKeys are currently unsupported
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest foreignKeysRequest) throws TException {
-    // PrimaryKeys are currently unsupported
-    // return empty list to not break DESCRIBE (FORMATTED | EXTENDED)
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest uniqueConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-    // Not supported, called by DESCRIBE (FORMATTED | EXTENDED)
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest notNullConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-    // Not supported, called by DESCRIBE (FORMATTED | EXTENDED)
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest defaultConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-    // Not supported, called by DESCRIBE (FORMATTED | EXTENDED)
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest checkConstraintsRequest) throws MetaException, NoSuchObjectException, TException {
-    // Not supported, called by DESCRIBE (FORMATTED | EXTENDED)
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public void createTableWithConstraints(
-          Table table,
-          List<SQLPrimaryKey> primaryKeys,
-          List<SQLForeignKey> foreignKeys,
-          List<SQLUniqueConstraint> uniqueConstraints,
-          List<SQLNotNullConstraint> notNullConstraints,
-          List<SQLDefaultConstraint> defaultConstraints,
-          List<SQLCheckConstraint> checkConstraints
-  ) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.createTableWithConstraints(table, primaryKeys, foreignKeys);
-  }
-
-  @Override
-  public void dropConstraint(
-          String dbName,
-          String tblName,
-          String constraintName
-  ) throws MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.dropConstraint(dbName, tblName, constraintName);
-  }
-
-  @Override
-  public void dropConstraint(String catalogName, String dbName, String tblName, String constraintName)
-          throws MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.dropConstraint(dbName, tblName, constraintName);
-  }
-
-  @Override
-  public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols)
-          throws MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.addPrimaryKey(primaryKeyCols);
-  }
-
-  @Override
-  public void addForeignKey(List<SQLForeignKey> foreignKeyCols)
-          throws MetaException, NoSuchObjectException, TException {
-    glueMetastoreClientDelegate.addForeignKey(foreignKeyCols);
-  }
-
-  @Override
-  public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws MetaException, NoSuchObjectException, TException {
-    throw new UnsupportedOperationException("addUniqueConstraint is not supported");
-  }
-
-  @Override
-  public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws MetaException, NoSuchObjectException, TException {
-    throw new UnsupportedOperationException("addNotNullConstraint is not supported");
-  }
-
-  @Override
-  public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws MetaException, NoSuchObjectException, TException {
-    throw new UnsupportedOperationException("addDefaultConstraint is not supported");
-  }
-
-  @Override
-  public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws MetaException, NoSuchObjectException, TException {
-    throw new UnsupportedOperationException("addCheckConstraint is not supported");
-  }
-
-  @Override
-  public String getMetastoreDbUuid() throws MetaException, TException {
-    throw new UnsupportedOperationException("getMetastoreDbUuid is not supported");
-  }
-
-  @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) throws InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("createResourcePlan is not supported");
-  }
-
-  @Override
-  public WMFullResourcePlan getResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("getResourcePlan is not supported");
-  }
-
-  @Override
-  public List<WMResourcePlan> getAllResourcePlans() throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("getAllResourcePlans is not supported");
-  }
-
-  @Override
-  public void dropResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("dropResourcePlan is not supported");
-  }
-
-  @Override
-  public WMFullResourcePlan alterResourcePlan(
-          String resourcePlanName,
-          WMNullableResourcePlan wmNullableResourcePlan,
-          boolean canActivateDisabled,
-          boolean isForceDeactivate,
-          boolean isReplace
-  ) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("alterResourcePlan is not supported");
-  }
-
-  @Override
-  public WMFullResourcePlan getActiveResourcePlan() throws MetaException, TException {
-    throw new UnsupportedOperationException("getActiveResourcePlan is not supported");
-  }
-
-  @Override
-  public WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("validateResourcePlan is not supported");
-  }
-
-  @Override
-  public void createWMTrigger(WMTrigger wmTrigger) throws InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("createWMTrigger is not supported");
-  }
-
-  @Override
-  public void alterWMTrigger(WMTrigger wmTrigger) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("alterWMTrigger is not supported");
-  }
-
-  @Override
-  public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("dropWMTrigger is not supported");
-  }
-
-  @Override
-  public List<WMTrigger> getTriggersForResourcePlan(String resourcePlan) throws NoSuchObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("getTriggersForResourcePlan is not supported");
-  }
-
-  @Override
-  public void createWMPool(WMPool wmPool) throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("createWMPool is not supported");
-  }
-
-  @Override
-  public void alterWMPool(WMNullablePool wmNullablePool, String poolPath) throws NoSuchObjectException, InvalidObjectException, TException {
-    throw new UnsupportedOperationException("alterWMPool is not supported");
-  }
-
-  @Override
-  public void dropWMPool(String resourcePlanName, String poolPath) throws TException {
-    throw new UnsupportedOperationException("dropWMPool is not supported");
-  }
-
-  @Override
-  public void createOrUpdateWMMapping(WMMapping wmMapping, boolean isUpdate) throws TException {
-    throw new UnsupportedOperationException("createOrUpdateWMMapping is not supported");
-  }
-
-  @Override
-  public void dropWMMapping(WMMapping wmMapping) throws TException {
-    throw new UnsupportedOperationException("dropWMMapping is not supported");
-  }
-
-  @Override
-  public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, boolean shouldDrop)
-          throws org.apache.hadoop.hive.metastore.api.AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException {
-    throw new UnsupportedOperationException("createOrDropTriggerToPoolMapping is not supported");
-  }
-
-  @Override
-  public void createISchema(ISchema iSchema) throws TException {
-    throw new UnsupportedOperationException("createISchema is not supported");
-  }
-
-  @Override
-  public void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException {
-    throw new UnsupportedOperationException("alterISchema is not supported");
-  }
-
-  @Override
-  public ISchema getISchema(String catName, String dbName, String name) throws TException {
-    throw new UnsupportedOperationException("getISchema is not supported");
-  }
-
-  @Override
-  public void dropISchema(String catName, String dbName, String name) throws TException {
-    throw new UnsupportedOperationException("dropISchema is not supported");
-  }
-
-  @Override
-  public void addSchemaVersion(SchemaVersion schemaVersion) throws TException {
-    throw new UnsupportedOperationException("addSchemaVersion is not supported");
-  }
-
-  @Override
-  public SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException {
-    throw new UnsupportedOperationException("getSchemaVersion is not supported");
-  }
-
-  @Override
-  public SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException {
-    throw new UnsupportedOperationException("getSchemaLatestVersion is not supported");
-  }
-
-  @Override
-  public List<SchemaVersion> getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException {
-    throw new UnsupportedOperationException("getSchemaAllVersions is not supported");
-  }
-
-  @Override
-  public void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException {
-    throw new UnsupportedOperationException("dropSchemaVersion is not supported");
-  }
-
-  @Override
-  public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst findSchemasByColsRqst) throws TException {
-    throw new UnsupportedOperationException("getSchemaByCols is not supported");
-  }
-
-  @Override
-  public void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException {
-    throw new UnsupportedOperationException("mapSchemaVersionToSerde is not supported");
-  }
-
-  @Override
-  public void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException {
-    throw new UnsupportedOperationException("setSchemaVersionState is not supported");
-  }
-
-  @Override
-  public void addSerDe(SerDeInfo serDeInfo) throws TException {
-    throw new UnsupportedOperationException("addSerDe is not supported");
-  }
-
-  @Override
-  public SerDeInfo getSerDe(String serDeName) throws TException {
-    throw new UnsupportedOperationException("getSerDe is not supported");
-  }
-
-  @Override
-  public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException {
-    throw new UnsupportedOperationException("lockMaterializationRebuild is not supported");
-  }
-
-  @Override
-  public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException {
-    throw new UnsupportedOperationException("heartbeatLockMaterializationRebuild is not supported");
-  }
-
-  @Override
-  public void addRuntimeStat(RuntimeStat runtimeStat) throws TException {
-    throw new UnsupportedOperationException("addRuntimeStat is not supported");
-  }
-
-  @Override
-  public List<RuntimeStat> getRuntimeStats(int maxWeight, int maxCreateTime) throws TException {
-    throw new UnsupportedOperationException("getRuntimeStats is not supported");
-  }
-
-  @Override
-  public ShowCompactResponse showCompactions() throws TException {
-    return glueMetastoreClientDelegate.showCompactions();
-  }
-
-  @Override
-  public void addDynamicPartitions(
-          long txnId,
-          long writeId,
-          String dbName,
-          String tblName,
-          List<String> partNames
-  ) throws TException {
-    glueMetastoreClientDelegate.addDynamicPartitions(txnId, dbName, tblName, partNames);
-  }
-
-  @Override
-  public void addDynamicPartitions(
-          long txnId,
-          long writeId,
-          String dbName,
-          String tblName,
-          List<String> partNames,
-          DataOperationType operationType
-  ) throws TException {
-    glueMetastoreClientDelegate.addDynamicPartitions(txnId, dbName, tblName, partNames, operationType);
-  }
-
-  @Override
-  public void insertTable(Table table, boolean overwrite) throws MetaException {
-    glueMetastoreClientDelegate.insertTable(table, overwrite);
-  }
-
-  @Override
-  public NotificationEventResponse getNextNotification(
-          long lastEventId, int maxEvents, NotificationFilter notificationFilter) throws TException {
-    // Unsupported, workaround for HS2's notification poll.
-    return new NotificationEventResponse();
-  }
-
-  @Override
-  public CurrentNotificationEventId getCurrentNotificationEventId() throws TException {
-    // Unsupported, workaround for HS2's notification poll.
-    return new CurrentNotificationEventId(0);
-  }
-
-  @Override
-  public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest notificationEventsCountRequest) throws TException {
-    throw new UnsupportedOperationException("getNotificationEventsCount is not supported");
-  }
-
-  @Override
-  public FireEventResponse fireListenerEvent(FireEventRequest fireEventRequest) throws TException {
-    return glueMetastoreClientDelegate.fireListenerEvent(fireEventRequest);
-  }
-
-  @Override
-  public ShowLocksResponse showLocks() throws TException {
-    return glueMetastoreClientDelegate.showLocks();
-  }
-
-  @Override
-  public ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException {
-    return glueMetastoreClientDelegate.showLocks(showLocksRequest);
-  }
-
-  @Override
-  public GetOpenTxnsInfoResponse showTxns() throws TException {
-    return glueMetastoreClientDelegate.showTxns();
-  }
-
-  @Deprecated
-  public boolean tableExists(String tableName) throws MetaException, TException, UnknownDBException {
-    //this method has been deprecated;
-    return tableExists(DEFAULT_DATABASE_NAME, tableName);
-  }
-
-  @Override
-  public boolean tableExists(String databaseName, String tableName) throws MetaException, TException,
-          UnknownDBException {
-    return glueMetastoreClientDelegate.tableExists(databaseName, tableName);
-  }
-
-  @Override
-  public boolean tableExists(String catalogName, String databaseName, String tableName)
-          throws MetaException, TException, UnknownDBException {
-    return glueMetastoreClientDelegate.tableExists(databaseName, tableName);
-  }
-
-  @Override
-  public void unlock(long lockId) throws NoSuchLockException, TxnOpenException, TException {
-    glueMetastoreClientDelegate.unlock(lockId);
-  }
-
-  @Override
-  public boolean updatePartitionColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics columnStatistics)
-          throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
-          org.apache.hadoop.hive.metastore.api.InvalidInputException {
-    return glueMetastoreClientDelegate.updatePartitionColumnStatistics(columnStatistics);
-  }
-
-  @Override
-  public boolean updateTableColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics columnStatistics)
-          throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
-          org.apache.hadoop.hive.metastore.api.InvalidInputException {
-    return glueMetastoreClientDelegate.updateTableColumnStatistics(columnStatistics);
-  }
-
-  @Override
-  public void validatePartitionNameCharacters(List<String> part_vals) throws TException, MetaException {
-    try {
-      String partitionValidationRegex =
-              MetastoreConf.getVar(conf, MetastoreConf.ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
-      Pattern partitionValidationPattern = Strings.isNullOrEmpty(partitionValidationRegex) ? null
-              : Pattern.compile(partitionValidationRegex);
-      MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern);
-    } catch (Exception e){
-      if (e instanceof MetaException) {
-        throw (MetaException) e;
-      } else {
-        throw new MetaException(e.getMessage());
-      }
-    }
-  }
-
-  private Path constructRenamedPath(Path defaultNewPath, Path currentPath) {
-    URI currentUri = currentPath.toUri();
-
-    return new Path(currentUri.getScheme(), currentUri.getAuthority(),
-            defaultNewPath.toUri().getPath());
-  }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCredentialsProviderFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCredentialsProviderFactory.java
deleted file mode 100644
index 41444078e4..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCredentialsProviderFactory.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-
-import com.amazonaws.auth.AWSCredentialsProvider;
-
-public interface AWSCredentialsProviderFactory {
-
-  AWSCredentialsProvider buildAWSCredentialsProvider(Configuration conf);
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueClientFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueClientFactory.java
deleted file mode 100644
index 72e75a891f..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueClientFactory.java
+++ /dev/null
@@ -1,157 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.ClientConfiguration;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.client.builder.AwsClientBuilder;
-import com.amazonaws.regions.Region;
-import com.amazonaws.regions.Regions;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.AWSGlueClientBuilder;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.log4j.Logger;
-
-import java.io.IOException;
-
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_CATALOG_CREDENTIALS_PROVIDER_FACTORY_CLASS;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_CATALOG_SEPARATOR;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_CONNECTION_TIMEOUT;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_ENDPOINT;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_MAX_CONNECTIONS;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_MAX_RETRY;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_SOCKET_TIMEOUT;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_REGION;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.DEFAULT_CONNECTION_TIMEOUT;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.DEFAULT_MAX_CONNECTIONS;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.DEFAULT_MAX_RETRY;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.DEFAULT_SOCKET_TIMEOUT;
-
-public final class AWSGlueClientFactory implements GlueClientFactory {
-
-  private static final Logger logger = Logger.getLogger(AWSGlueClientFactory.class);
-
-  private final Configuration conf;
-
-  public AWSGlueClientFactory(Configuration conf) {
-    Preconditions.checkNotNull(conf, "Configuration cannot be null");
-    this.conf = conf;
-  }
-
-  @Override
-  public AWSGlue newClient() throws MetaException {
-    try {
-      AWSGlueClientBuilder glueClientBuilder = AWSGlueClientBuilder.standard()
-              .withCredentials(getAWSCredentialsProvider(conf));
-
-      String regionStr = getProperty(AWS_REGION, conf);
-      String glueEndpoint = getProperty(AWS_GLUE_ENDPOINT, conf);
-
-      // ClientBuilder only allows one of EndpointConfiguration or Region to be set
-      if (StringUtils.isNotBlank(glueEndpoint)) {
-        logger.info("Setting glue service endpoint to " + glueEndpoint);
-        glueClientBuilder.setEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(glueEndpoint, null));
-      } else if (StringUtils.isNotBlank(regionStr)) {
-        logger.info("Setting region to : " + regionStr);
-        glueClientBuilder.setRegion(regionStr);
-      } else {
-        Region currentRegion = Regions.getCurrentRegion();
-        if (currentRegion != null) {
-          logger.info("Using region from ec2 metadata : " + currentRegion.getName());
-          glueClientBuilder.setRegion(currentRegion.getName());
-        } else {
-          logger.info("No region info found, using SDK default region: us-east-1");
-        }
-      }
-
-      glueClientBuilder.setClientConfiguration(buildClientConfiguration(conf));
-      return decorateGlueClient(glueClientBuilder.build());
-    } catch (Exception e) {
-      String message = "Unable to build AWSGlueClient: " + e;
-      logger.error(message);
-      throw new MetaException(message);
-    }
-  }
-
-  private AWSGlue decorateGlueClient(AWSGlue originalGlueClient) {
-    if (Strings.isNullOrEmpty(getProperty(AWS_GLUE_CATALOG_SEPARATOR, conf))) {
-      return originalGlueClient;
-    }
-    return new AWSGlueMultipleCatalogDecorator(
-            originalGlueClient,
-            getProperty(AWS_GLUE_CATALOG_SEPARATOR, conf));
-  }
-
-  @VisibleForTesting
-  AWSCredentialsProvider getAWSCredentialsProvider(Configuration conf) {
-
-    Class<? extends AWSCredentialsProviderFactory> providerFactoryClass = conf
-            .getClass(AWS_CATALOG_CREDENTIALS_PROVIDER_FACTORY_CLASS,
-                    DefaultAWSCredentialsProviderFactory.class).asSubclass(
-                    AWSCredentialsProviderFactory.class);
-    AWSCredentialsProviderFactory provider = ReflectionUtils.newInstance(
-            providerFactoryClass, conf);
-    return provider.buildAWSCredentialsProvider(conf);
-  }
-
-  private String createUserAgent() {
-    try {
-      String ugi = UserGroupInformation.getCurrentUser().getUserName();
-      return "ugi=" + ugi;
-    } catch (IOException e) {
-      /*
-       * IOException here means that the login failed according
-       * to UserGroupInformation.getCurrentUser(). In this case,
-       * we will throw a RuntimeException the same way as
-       * HiveMetaStoreClient.java
-       * If not catching IOException, the build will fail with
-       * unreported exception IOExcetion.
-       */
-      logger.error("Unable to resolve current user name " + e.getMessage());
-      throw new RuntimeException(e);
-    }
-  }
-
-  private ClientConfiguration buildClientConfiguration(Configuration conf) {
-    // Pass UserAgent to client configuration, which enable CloudTrail to audit UGI info
-    // when using Glue Catalog as metastore
-    ClientConfiguration clientConfiguration = new ClientConfiguration()
-            .withUserAgent(createUserAgent())
-            .withMaxErrorRetry(conf.getInt(AWS_GLUE_MAX_RETRY, DEFAULT_MAX_RETRY))
-            .withMaxConnections(conf.getInt(AWS_GLUE_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS))
-            .withConnectionTimeout(conf.getInt(AWS_GLUE_CONNECTION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT))
-            .withSocketTimeout(conf.getInt(AWS_GLUE_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));
-    return clientConfiguration;
-  }
-
-  private static String getProperty(String propertyName, Configuration conf) {
-    return Strings.isNullOrEmpty(System.getProperty(propertyName)) ?
-            conf.get(propertyName) : System.getProperty(propertyName);
-  }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java
deleted file mode 100644
index 595b21ab5d..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java
+++ /dev/null
@@ -1,1343 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.AmazonWebServiceRequest;
-import com.amazonaws.ResponseMetadata;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.BatchCreatePartitionRequest;
-import com.amazonaws.services.glue.model.BatchCreatePartitionResult;
-import com.amazonaws.services.glue.model.BatchDeleteConnectionRequest;
-import com.amazonaws.services.glue.model.BatchDeleteConnectionResult;
-import com.amazonaws.services.glue.model.BatchDeletePartitionRequest;
-import com.amazonaws.services.glue.model.BatchDeletePartitionResult;
-import com.amazonaws.services.glue.model.BatchDeleteTableRequest;
-import com.amazonaws.services.glue.model.BatchDeleteTableResult;
-import com.amazonaws.services.glue.model.BatchDeleteTableVersionRequest;
-import com.amazonaws.services.glue.model.BatchDeleteTableVersionResult;
-import com.amazonaws.services.glue.model.BatchGetBlueprintsRequest;
-import com.amazonaws.services.glue.model.BatchGetBlueprintsResult;
-import com.amazonaws.services.glue.model.BatchGetCrawlersRequest;
-import com.amazonaws.services.glue.model.BatchGetCrawlersResult;
-import com.amazonaws.services.glue.model.BatchGetCustomEntityTypesRequest;
-import com.amazonaws.services.glue.model.BatchGetCustomEntityTypesResult;
-import com.amazonaws.services.glue.model.BatchGetDevEndpointsRequest;
-import com.amazonaws.services.glue.model.BatchGetDevEndpointsResult;
-import com.amazonaws.services.glue.model.BatchGetJobsRequest;
-import com.amazonaws.services.glue.model.BatchGetJobsResult;
-import com.amazonaws.services.glue.model.BatchGetPartitionRequest;
-import com.amazonaws.services.glue.model.BatchGetPartitionResult;
-import com.amazonaws.services.glue.model.BatchGetTriggersRequest;
-import com.amazonaws.services.glue.model.BatchGetTriggersResult;
-import com.amazonaws.services.glue.model.BatchGetWorkflowsRequest;
-import com.amazonaws.services.glue.model.BatchGetWorkflowsResult;
-import com.amazonaws.services.glue.model.BatchStopJobRunRequest;
-import com.amazonaws.services.glue.model.BatchStopJobRunResult;
-import com.amazonaws.services.glue.model.BatchUpdatePartitionRequest;
-import com.amazonaws.services.glue.model.BatchUpdatePartitionResult;
-import com.amazonaws.services.glue.model.CancelMLTaskRunRequest;
-import com.amazonaws.services.glue.model.CancelMLTaskRunResult;
-import com.amazonaws.services.glue.model.CancelStatementRequest;
-import com.amazonaws.services.glue.model.CancelStatementResult;
-import com.amazonaws.services.glue.model.CheckSchemaVersionValidityRequest;
-import com.amazonaws.services.glue.model.CheckSchemaVersionValidityResult;
-import com.amazonaws.services.glue.model.CreateBlueprintRequest;
-import com.amazonaws.services.glue.model.CreateBlueprintResult;
-import com.amazonaws.services.glue.model.CreateClassifierRequest;
-import com.amazonaws.services.glue.model.CreateClassifierResult;
-import com.amazonaws.services.glue.model.CreateConnectionRequest;
-import com.amazonaws.services.glue.model.CreateConnectionResult;
-import com.amazonaws.services.glue.model.CreateCrawlerRequest;
-import com.amazonaws.services.glue.model.CreateCrawlerResult;
-import com.amazonaws.services.glue.model.CreateCustomEntityTypeRequest;
-import com.amazonaws.services.glue.model.CreateCustomEntityTypeResult;
-import com.amazonaws.services.glue.model.CreateDatabaseRequest;
-import com.amazonaws.services.glue.model.CreateDatabaseResult;
-import com.amazonaws.services.glue.model.CreateDevEndpointRequest;
-import com.amazonaws.services.glue.model.CreateDevEndpointResult;
-import com.amazonaws.services.glue.model.CreateJobRequest;
-import com.amazonaws.services.glue.model.CreateJobResult;
-import com.amazonaws.services.glue.model.CreateMLTransformRequest;
-import com.amazonaws.services.glue.model.CreateMLTransformResult;
-import com.amazonaws.services.glue.model.CreatePartitionIndexRequest;
-import com.amazonaws.services.glue.model.CreatePartitionIndexResult;
-import com.amazonaws.services.glue.model.CreatePartitionRequest;
-import com.amazonaws.services.glue.model.CreatePartitionResult;
-import com.amazonaws.services.glue.model.CreateRegistryRequest;
-import com.amazonaws.services.glue.model.CreateRegistryResult;
-import com.amazonaws.services.glue.model.CreateSchemaRequest;
-import com.amazonaws.services.glue.model.CreateSchemaResult;
-import com.amazonaws.services.glue.model.CreateScriptRequest;
-import com.amazonaws.services.glue.model.CreateScriptResult;
-import com.amazonaws.services.glue.model.CreateSecurityConfigurationRequest;
-import com.amazonaws.services.glue.model.CreateSecurityConfigurationResult;
-import com.amazonaws.services.glue.model.CreateSessionRequest;
-import com.amazonaws.services.glue.model.CreateSessionResult;
-import com.amazonaws.services.glue.model.CreateTableRequest;
-import com.amazonaws.services.glue.model.CreateTableResult;
-import com.amazonaws.services.glue.model.CreateTriggerRequest;
-import com.amazonaws.services.glue.model.CreateTriggerResult;
-import com.amazonaws.services.glue.model.CreateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.CreateUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.CreateWorkflowRequest;
-import com.amazonaws.services.glue.model.CreateWorkflowResult;
-import com.amazonaws.services.glue.model.DeleteBlueprintRequest;
-import com.amazonaws.services.glue.model.DeleteBlueprintResult;
-import com.amazonaws.services.glue.model.DeleteClassifierRequest;
-import com.amazonaws.services.glue.model.DeleteClassifierResult;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForPartitionResult;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForTableResult;
-import com.amazonaws.services.glue.model.DeleteConnectionRequest;
-import com.amazonaws.services.glue.model.DeleteConnectionResult;
-import com.amazonaws.services.glue.model.DeleteCrawlerRequest;
-import com.amazonaws.services.glue.model.DeleteCrawlerResult;
-import com.amazonaws.services.glue.model.DeleteCustomEntityTypeRequest;
-import com.amazonaws.services.glue.model.DeleteCustomEntityTypeResult;
-import com.amazonaws.services.glue.model.DeleteDatabaseRequest;
-import com.amazonaws.services.glue.model.DeleteDatabaseResult;
-import com.amazonaws.services.glue.model.DeleteDevEndpointRequest;
-import com.amazonaws.services.glue.model.DeleteDevEndpointResult;
-import com.amazonaws.services.glue.model.DeleteJobRequest;
-import com.amazonaws.services.glue.model.DeleteJobResult;
-import com.amazonaws.services.glue.model.DeleteMLTransformRequest;
-import com.amazonaws.services.glue.model.DeleteMLTransformResult;
-import com.amazonaws.services.glue.model.DeletePartitionIndexRequest;
-import com.amazonaws.services.glue.model.DeletePartitionIndexResult;
-import com.amazonaws.services.glue.model.DeletePartitionRequest;
-import com.amazonaws.services.glue.model.DeletePartitionResult;
-import com.amazonaws.services.glue.model.DeleteRegistryRequest;
-import com.amazonaws.services.glue.model.DeleteRegistryResult;
-import com.amazonaws.services.glue.model.DeleteResourcePolicyRequest;
-import com.amazonaws.services.glue.model.DeleteResourcePolicyResult;
-import com.amazonaws.services.glue.model.DeleteSchemaRequest;
-import com.amazonaws.services.glue.model.DeleteSchemaResult;
-import com.amazonaws.services.glue.model.DeleteSchemaVersionsRequest;
-import com.amazonaws.services.glue.model.DeleteSchemaVersionsResult;
-import com.amazonaws.services.glue.model.DeleteSecurityConfigurationRequest;
-import com.amazonaws.services.glue.model.DeleteSecurityConfigurationResult;
-import com.amazonaws.services.glue.model.DeleteSessionRequest;
-import com.amazonaws.services.glue.model.DeleteSessionResult;
-import com.amazonaws.services.glue.model.DeleteTableRequest;
-import com.amazonaws.services.glue.model.DeleteTableResult;
-import com.amazonaws.services.glue.model.DeleteTableVersionRequest;
-import com.amazonaws.services.glue.model.DeleteTableVersionResult;
-import com.amazonaws.services.glue.model.DeleteTriggerRequest;
-import com.amazonaws.services.glue.model.DeleteTriggerResult;
-import com.amazonaws.services.glue.model.DeleteUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.DeleteUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.DeleteWorkflowRequest;
-import com.amazonaws.services.glue.model.DeleteWorkflowResult;
-import com.amazonaws.services.glue.model.GetBlueprintRequest;
-import com.amazonaws.services.glue.model.GetBlueprintResult;
-import com.amazonaws.services.glue.model.GetBlueprintRunRequest;
-import com.amazonaws.services.glue.model.GetBlueprintRunResult;
-import com.amazonaws.services.glue.model.GetBlueprintRunsRequest;
-import com.amazonaws.services.glue.model.GetBlueprintRunsResult;
-import com.amazonaws.services.glue.model.GetCatalogImportStatusRequest;
-import com.amazonaws.services.glue.model.GetCatalogImportStatusResult;
-import com.amazonaws.services.glue.model.GetClassifierRequest;
-import com.amazonaws.services.glue.model.GetClassifierResult;
-import com.amazonaws.services.glue.model.GetClassifiersRequest;
-import com.amazonaws.services.glue.model.GetClassifiersResult;
-import com.amazonaws.services.glue.model.GetConnectionRequest;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForPartitionResult;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForTableResult;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.GetConnectionResult;
-import com.amazonaws.services.glue.model.GetConnectionsRequest;
-import com.amazonaws.services.glue.model.GetConnectionsResult;
-import com.amazonaws.services.glue.model.GetCrawlerMetricsRequest;
-import com.amazonaws.services.glue.model.GetCrawlerMetricsResult;
-import com.amazonaws.services.glue.model.GetCrawlerRequest;
-import com.amazonaws.services.glue.model.GetCrawlerResult;
-import com.amazonaws.services.glue.model.GetCrawlersRequest;
-import com.amazonaws.services.glue.model.GetCrawlersResult;
-import com.amazonaws.services.glue.model.GetCustomEntityTypeRequest;
-import com.amazonaws.services.glue.model.GetCustomEntityTypeResult;
-import com.amazonaws.services.glue.model.GetDataCatalogEncryptionSettingsRequest;
-import com.amazonaws.services.glue.model.GetDataCatalogEncryptionSettingsResult;
-import com.amazonaws.services.glue.model.GetDatabaseRequest;
-import com.amazonaws.services.glue.model.GetDatabaseResult;
-import com.amazonaws.services.glue.model.GetDatabasesRequest;
-import com.amazonaws.services.glue.model.GetDatabasesResult;
-import com.amazonaws.services.glue.model.GetDataflowGraphRequest;
-import com.amazonaws.services.glue.model.GetDataflowGraphResult;
-import com.amazonaws.services.glue.model.GetDevEndpointRequest;
-import com.amazonaws.services.glue.model.GetDevEndpointResult;
-import com.amazonaws.services.glue.model.GetDevEndpointsRequest;
-import com.amazonaws.services.glue.model.GetDevEndpointsResult;
-import com.amazonaws.services.glue.model.GetJobBookmarkRequest;
-import com.amazonaws.services.glue.model.GetJobBookmarkResult;
-import com.amazonaws.services.glue.model.GetJobRequest;
-import com.amazonaws.services.glue.model.GetJobResult;
-import com.amazonaws.services.glue.model.GetJobRunRequest;
-import com.amazonaws.services.glue.model.GetJobRunResult;
-import com.amazonaws.services.glue.model.GetJobRunsRequest;
-import com.amazonaws.services.glue.model.GetJobRunsResult;
-import com.amazonaws.services.glue.model.GetJobsRequest;
-import com.amazonaws.services.glue.model.GetJobsResult;
-import com.amazonaws.services.glue.model.GetMLTaskRunRequest;
-import com.amazonaws.services.glue.model.GetMLTaskRunResult;
-import com.amazonaws.services.glue.model.GetMLTaskRunsRequest;
-import com.amazonaws.services.glue.model.GetMLTaskRunsResult;
-import com.amazonaws.services.glue.model.GetMLTransformRequest;
-import com.amazonaws.services.glue.model.GetMLTransformResult;
-import com.amazonaws.services.glue.model.GetMLTransformsRequest;
-import com.amazonaws.services.glue.model.GetMLTransformsResult;
-import com.amazonaws.services.glue.model.GetMappingRequest;
-import com.amazonaws.services.glue.model.GetMappingResult;
-import com.amazonaws.services.glue.model.GetPartitionIndexesRequest;
-import com.amazonaws.services.glue.model.GetPartitionIndexesResult;
-import com.amazonaws.services.glue.model.GetPartitionRequest;
-import com.amazonaws.services.glue.model.GetPartitionResult;
-import com.amazonaws.services.glue.model.GetPartitionsRequest;
-import com.amazonaws.services.glue.model.GetPartitionsResult;
-import com.amazonaws.services.glue.model.GetPlanRequest;
-import com.amazonaws.services.glue.model.GetPlanResult;
-import com.amazonaws.services.glue.model.GetRegistryRequest;
-import com.amazonaws.services.glue.model.GetRegistryResult;
-import com.amazonaws.services.glue.model.GetResourcePoliciesRequest;
-import com.amazonaws.services.glue.model.GetResourcePoliciesResult;
-import com.amazonaws.services.glue.model.GetResourcePolicyRequest;
-import com.amazonaws.services.glue.model.GetResourcePolicyResult;
-import com.amazonaws.services.glue.model.GetSchemaByDefinitionRequest;
-import com.amazonaws.services.glue.model.GetSchemaByDefinitionResult;
-import com.amazonaws.services.glue.model.GetSchemaRequest;
-import com.amazonaws.services.glue.model.GetSchemaResult;
-import com.amazonaws.services.glue.model.GetSchemaVersionRequest;
-import com.amazonaws.services.glue.model.GetSchemaVersionResult;
-import com.amazonaws.services.glue.model.GetSchemaVersionsDiffRequest;
-import com.amazonaws.services.glue.model.GetSchemaVersionsDiffResult;
-import com.amazonaws.services.glue.model.GetSecurityConfigurationRequest;
-import com.amazonaws.services.glue.model.GetSecurityConfigurationResult;
-import com.amazonaws.services.glue.model.GetSecurityConfigurationsRequest;
-import com.amazonaws.services.glue.model.GetSecurityConfigurationsResult;
-import com.amazonaws.services.glue.model.GetSessionRequest;
-import com.amazonaws.services.glue.model.GetSessionResult;
-import com.amazonaws.services.glue.model.GetStatementRequest;
-import com.amazonaws.services.glue.model.GetStatementResult;
-import com.amazonaws.services.glue.model.GetTableRequest;
-import com.amazonaws.services.glue.model.GetTableResult;
-import com.amazonaws.services.glue.model.GetTableVersionRequest;
-import com.amazonaws.services.glue.model.GetTableVersionResult;
-import com.amazonaws.services.glue.model.GetTableVersionsRequest;
-import com.amazonaws.services.glue.model.GetTableVersionsResult;
-import com.amazonaws.services.glue.model.GetTablesRequest;
-import com.amazonaws.services.glue.model.GetTablesResult;
-import com.amazonaws.services.glue.model.GetTagsRequest;
-import com.amazonaws.services.glue.model.GetTagsResult;
-import com.amazonaws.services.glue.model.GetTriggerRequest;
-import com.amazonaws.services.glue.model.GetTriggerResult;
-import com.amazonaws.services.glue.model.GetTriggersRequest;
-import com.amazonaws.services.glue.model.GetTriggersResult;
-import com.amazonaws.services.glue.model.GetUnfilteredPartitionMetadataRequest;
-import com.amazonaws.services.glue.model.GetUnfilteredPartitionMetadataResult;
-import com.amazonaws.services.glue.model.GetUnfilteredPartitionsMetadataRequest;
-import com.amazonaws.services.glue.model.GetUnfilteredPartitionsMetadataResult;
-import com.amazonaws.services.glue.model.GetUnfilteredTableMetadataRequest;
-import com.amazonaws.services.glue.model.GetUnfilteredTableMetadataResult;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsResult;
-import com.amazonaws.services.glue.model.GetWorkflowRequest;
-import com.amazonaws.services.glue.model.GetWorkflowResult;
-import com.amazonaws.services.glue.model.GetWorkflowRunPropertiesRequest;
-import com.amazonaws.services.glue.model.GetWorkflowRunPropertiesResult;
-import com.amazonaws.services.glue.model.GetWorkflowRunRequest;
-import com.amazonaws.services.glue.model.GetWorkflowRunResult;
-import com.amazonaws.services.glue.model.GetWorkflowRunsRequest;
-import com.amazonaws.services.glue.model.GetWorkflowRunsResult;
-import com.amazonaws.services.glue.model.ImportCatalogToGlueRequest;
-import com.amazonaws.services.glue.model.ImportCatalogToGlueResult;
-import com.amazonaws.services.glue.model.ListBlueprintsRequest;
-import com.amazonaws.services.glue.model.ListBlueprintsResult;
-import com.amazonaws.services.glue.model.ListCrawlersRequest;
-import com.amazonaws.services.glue.model.ListCrawlersResult;
-import com.amazonaws.services.glue.model.ListCrawlsRequest;
-import com.amazonaws.services.glue.model.ListCrawlsResult;
-import com.amazonaws.services.glue.model.ListCustomEntityTypesRequest;
-import com.amazonaws.services.glue.model.ListCustomEntityTypesResult;
-import com.amazonaws.services.glue.model.ListDevEndpointsRequest;
-import com.amazonaws.services.glue.model.ListDevEndpointsResult;
-import com.amazonaws.services.glue.model.ListJobsRequest;
-import com.amazonaws.services.glue.model.ListJobsResult;
-import com.amazonaws.services.glue.model.ListMLTransformsRequest;
-import com.amazonaws.services.glue.model.ListMLTransformsResult;
-import com.amazonaws.services.glue.model.ListRegistriesRequest;
-import com.amazonaws.services.glue.model.ListRegistriesResult;
-import com.amazonaws.services.glue.model.ListSchemaVersionsRequest;
-import com.amazonaws.services.glue.model.ListSchemaVersionsResult;
-import com.amazonaws.services.glue.model.ListSchemasRequest;
-import com.amazonaws.services.glue.model.ListSchemasResult;
-import com.amazonaws.services.glue.model.ListSessionsRequest;
-import com.amazonaws.services.glue.model.ListSessionsResult;
-import com.amazonaws.services.glue.model.ListStatementsRequest;
-import com.amazonaws.services.glue.model.ListStatementsResult;
-import com.amazonaws.services.glue.model.ListTriggersRequest;
-import com.amazonaws.services.glue.model.ListTriggersResult;
-import com.amazonaws.services.glue.model.ListWorkflowsRequest;
-import com.amazonaws.services.glue.model.ListWorkflowsResult;
-import com.amazonaws.services.glue.model.PutDataCatalogEncryptionSettingsRequest;
-import com.amazonaws.services.glue.model.PutDataCatalogEncryptionSettingsResult;
-import com.amazonaws.services.glue.model.PutResourcePolicyRequest;
-import com.amazonaws.services.glue.model.PutResourcePolicyResult;
-import com.amazonaws.services.glue.model.PutSchemaVersionMetadataRequest;
-import com.amazonaws.services.glue.model.PutSchemaVersionMetadataResult;
-import com.amazonaws.services.glue.model.PutWorkflowRunPropertiesRequest;
-import com.amazonaws.services.glue.model.PutWorkflowRunPropertiesResult;
-import com.amazonaws.services.glue.model.QuerySchemaVersionMetadataRequest;
-import com.amazonaws.services.glue.model.QuerySchemaVersionMetadataResult;
-import com.amazonaws.services.glue.model.RegisterSchemaVersionRequest;
-import com.amazonaws.services.glue.model.RegisterSchemaVersionResult;
-import com.amazonaws.services.glue.model.RemoveSchemaVersionMetadataRequest;
-import com.amazonaws.services.glue.model.RemoveSchemaVersionMetadataResult;
-import com.amazonaws.services.glue.model.ResetJobBookmarkRequest;
-import com.amazonaws.services.glue.model.ResetJobBookmarkResult;
-import com.amazonaws.services.glue.model.ResumeWorkflowRunRequest;
-import com.amazonaws.services.glue.model.ResumeWorkflowRunResult;
-import com.amazonaws.services.glue.model.RunStatementRequest;
-import com.amazonaws.services.glue.model.RunStatementResult;
-import com.amazonaws.services.glue.model.SearchTablesRequest;
-import com.amazonaws.services.glue.model.SearchTablesResult;
-import com.amazonaws.services.glue.model.StartBlueprintRunRequest;
-import com.amazonaws.services.glue.model.StartBlueprintRunResult;
-import com.amazonaws.services.glue.model.StartCrawlerRequest;
-import com.amazonaws.services.glue.model.StartCrawlerResult;
-import com.amazonaws.services.glue.model.StartCrawlerScheduleRequest;
-import com.amazonaws.services.glue.model.StartCrawlerScheduleResult;
-import com.amazonaws.services.glue.model.StartExportLabelsTaskRunRequest;
-import com.amazonaws.services.glue.model.StartExportLabelsTaskRunResult;
-import com.amazonaws.services.glue.model.StartImportLabelsTaskRunRequest;
-import com.amazonaws.services.glue.model.StartImportLabelsTaskRunResult;
-import com.amazonaws.services.glue.model.StartJobRunRequest;
-import com.amazonaws.services.glue.model.StartJobRunResult;
-import com.amazonaws.services.glue.model.StartMLEvaluationTaskRunRequest;
-import com.amazonaws.services.glue.model.StartMLEvaluationTaskRunResult;
-import com.amazonaws.services.glue.model.StartMLLabelingSetGenerationTaskRunRequest;
-import com.amazonaws.services.glue.model.StartMLLabelingSetGenerationTaskRunResult;
-import com.amazonaws.services.glue.model.StartTriggerRequest;
-import com.amazonaws.services.glue.model.StartTriggerResult;
-import com.amazonaws.services.glue.model.StartWorkflowRunRequest;
-import com.amazonaws.services.glue.model.StartWorkflowRunResult;
-import com.amazonaws.services.glue.model.StopCrawlerRequest;
-import com.amazonaws.services.glue.model.StopCrawlerResult;
-import com.amazonaws.services.glue.model.StopCrawlerScheduleRequest;
-import com.amazonaws.services.glue.model.StopCrawlerScheduleResult;
-import com.amazonaws.services.glue.model.StopSessionRequest;
-import com.amazonaws.services.glue.model.StopSessionResult;
-import com.amazonaws.services.glue.model.StopTriggerRequest;
-import com.amazonaws.services.glue.model.StopTriggerResult;
-import com.amazonaws.services.glue.model.StopWorkflowRunRequest;
-import com.amazonaws.services.glue.model.StopWorkflowRunResult;
-import com.amazonaws.services.glue.model.TagResourceRequest;
-import com.amazonaws.services.glue.model.TagResourceResult;
-import com.amazonaws.services.glue.model.UntagResourceRequest;
-import com.amazonaws.services.glue.model.UntagResourceResult;
-import com.amazonaws.services.glue.model.UpdateBlueprintRequest;
-import com.amazonaws.services.glue.model.UpdateBlueprintResult;
-import com.amazonaws.services.glue.model.UpdateClassifierRequest;
-import com.amazonaws.services.glue.model.UpdateClassifierResult;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForPartitionResult;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForTableResult;
-import com.amazonaws.services.glue.model.UpdateConnectionRequest;
-import com.amazonaws.services.glue.model.UpdateConnectionResult;
-import com.amazonaws.services.glue.model.UpdateCrawlerRequest;
-import com.amazonaws.services.glue.model.UpdateCrawlerResult;
-import com.amazonaws.services.glue.model.UpdateCrawlerScheduleRequest;
-import com.amazonaws.services.glue.model.UpdateCrawlerScheduleResult;
-import com.amazonaws.services.glue.model.UpdateDatabaseRequest;
-import com.amazonaws.services.glue.model.UpdateDatabaseResult;
-import com.amazonaws.services.glue.model.UpdateDevEndpointRequest;
-import com.amazonaws.services.glue.model.UpdateDevEndpointResult;
-import com.amazonaws.services.glue.model.UpdateJobRequest;
-import com.amazonaws.services.glue.model.UpdateJobResult;
-import com.amazonaws.services.glue.model.UpdateMLTransformRequest;
-import com.amazonaws.services.glue.model.UpdateMLTransformResult;
-import com.amazonaws.services.glue.model.UpdatePartitionRequest;
-import com.amazonaws.services.glue.model.UpdatePartitionResult;
-import com.amazonaws.services.glue.model.UpdateRegistryRequest;
-import com.amazonaws.services.glue.model.UpdateRegistryResult;
-import com.amazonaws.services.glue.model.UpdateSchemaRequest;
-import com.amazonaws.services.glue.model.UpdateSchemaResult;
-import com.amazonaws.services.glue.model.UpdateTableRequest;
-import com.amazonaws.services.glue.model.UpdateTableResult;
-import com.amazonaws.services.glue.model.UpdateTriggerRequest;
-import com.amazonaws.services.glue.model.UpdateTriggerResult;
-import com.amazonaws.services.glue.model.UpdateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.UpdateUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.UpdateWorkflowRequest;
-import com.amazonaws.services.glue.model.UpdateWorkflowResult;
-
-/**
- * Base decorator for AWSGlue interface. It doesn't decorate any functionality but just proxy all methods to
- * decoratedAwsGlue. It should be used as a parent for specific decorators where only necessary methods are overwritten
- * and decorated.
- * All @Override methods are generated by IntelliJ IDEA.
- */
-public class AWSGlueDecoratorBase implements AWSGlue {
-
-
-
-    private AWSGlue decoratedAwsGlue;
-
-    public AWSGlueDecoratorBase(AWSGlue awsGlueToBeDecorated) {
-        this.decoratedAwsGlue = awsGlueToBeDecorated;
-    }
-
-    @Override
-    public BatchCreatePartitionResult batchCreatePartition(BatchCreatePartitionRequest batchCreatePartitionRequest) {
-        return decoratedAwsGlue.batchCreatePartition(batchCreatePartitionRequest);
-    }
-
-    @Override
-    public BatchDeleteConnectionResult batchDeleteConnection(BatchDeleteConnectionRequest batchDeleteConnectionRequest) {
-        return decoratedAwsGlue.batchDeleteConnection(batchDeleteConnectionRequest);
-    }
-
-    @Override
-    public BatchDeletePartitionResult batchDeletePartition(BatchDeletePartitionRequest batchDeletePartitionRequest) {
-        return decoratedAwsGlue.batchDeletePartition(batchDeletePartitionRequest);
-    }
-
-    @Override
-    public BatchDeleteTableResult batchDeleteTable(BatchDeleteTableRequest batchDeleteTableRequest) {
-        return decoratedAwsGlue.batchDeleteTable(batchDeleteTableRequest);
-    }
-
-    @Override
-    public BatchDeleteTableVersionResult batchDeleteTableVersion(BatchDeleteTableVersionRequest batchDeleteTableVersionRequest) {
-        return decoratedAwsGlue.batchDeleteTableVersion(batchDeleteTableVersionRequest);
-    }
-
-    @Override
-    public BatchGetCrawlersResult batchGetCrawlers(BatchGetCrawlersRequest batchGetCrawlersRequest) {
-        return decoratedAwsGlue.batchGetCrawlers(batchGetCrawlersRequest);
-    }
-
-    @Override
-    public BatchGetCustomEntityTypesResult batchGetCustomEntityTypes(BatchGetCustomEntityTypesRequest batchGetCustomEntityTypesRequest) {
-        return decoratedAwsGlue.batchGetCustomEntityTypes(batchGetCustomEntityTypesRequest);
-    }
-
-    @Override
-    public BatchGetDevEndpointsResult batchGetDevEndpoints(BatchGetDevEndpointsRequest batchGetDevEndpointsRequest) {
-        return decoratedAwsGlue.batchGetDevEndpoints(batchGetDevEndpointsRequest);
-    }
-
-    @Override
-    public BatchGetJobsResult batchGetJobs(BatchGetJobsRequest batchGetJobsRequest) {
-        return decoratedAwsGlue.batchGetJobs(batchGetJobsRequest);
-    }
-
-    @Override
-    public BatchGetPartitionResult batchGetPartition(BatchGetPartitionRequest batchGetPartitionRequest) {
-        return decoratedAwsGlue.batchGetPartition(batchGetPartitionRequest);
-    }
-
-    @Override
-    public BatchGetTriggersResult batchGetTriggers(BatchGetTriggersRequest batchGetTriggersRequest) {
-        return decoratedAwsGlue.batchGetTriggers(batchGetTriggersRequest);
-    }
-
-    @Override
-    public BatchGetWorkflowsResult batchGetWorkflows(BatchGetWorkflowsRequest batchGetWorkflowsRequest) {
-        return decoratedAwsGlue.batchGetWorkflows(batchGetWorkflowsRequest);
-    }
-
-    @Override
-    public BatchStopJobRunResult batchStopJobRun(BatchStopJobRunRequest batchStopJobRunRequest) {
-        return decoratedAwsGlue.batchStopJobRun(batchStopJobRunRequest);
-    }
-
-    @Override
-    public BatchUpdatePartitionResult batchUpdatePartition(BatchUpdatePartitionRequest batchUpdatePartitionRequest) {
-        return decoratedAwsGlue.batchUpdatePartition(batchUpdatePartitionRequest);
-    }
-
-    @Override
-    public CancelMLTaskRunResult cancelMLTaskRun(CancelMLTaskRunRequest cancelMLTaskRunRequest) {
-        return decoratedAwsGlue.cancelMLTaskRun(cancelMLTaskRunRequest);
-    }
-
-    @Override
-    public CancelStatementResult cancelStatement(CancelStatementRequest cancelStatementRequest) {
-        return decoratedAwsGlue.cancelStatement(cancelStatementRequest);
-    }
-
-    @Override
-    public CheckSchemaVersionValidityResult checkSchemaVersionValidity(CheckSchemaVersionValidityRequest checkSchemaVersionValidityRequest) {
-        return null;
-    }
-
-    @Override
-    public CreateBlueprintResult createBlueprint(CreateBlueprintRequest createBlueprintRequest) {
-        return null;
-    }
-
-    @Override
-    public CreateClassifierResult createClassifier(CreateClassifierRequest createClassifierRequest) {
-        return decoratedAwsGlue.createClassifier(createClassifierRequest);
-    }
-
-    @Override
-    public CreateConnectionResult createConnection(CreateConnectionRequest createConnectionRequest) {
-        return decoratedAwsGlue.createConnection(createConnectionRequest);
-    }
-
-    @Override
-    public CreateCrawlerResult createCrawler(CreateCrawlerRequest createCrawlerRequest) {
-        return decoratedAwsGlue.createCrawler(createCrawlerRequest);
-    }
-
-    @Override
-    public CreateCustomEntityTypeResult createCustomEntityType(CreateCustomEntityTypeRequest createCustomEntityTypeRequest) {
-        return decoratedAwsGlue.createCustomEntityType(createCustomEntityTypeRequest);
-    }
-
-    @Override
-    public CreateDatabaseResult createDatabase(CreateDatabaseRequest createDatabaseRequest) {
-        return decoratedAwsGlue.createDatabase(createDatabaseRequest);
-    }
-
-    @Override
-    public CreateDevEndpointResult createDevEndpoint(CreateDevEndpointRequest createDevEndpointRequest) {
-        return decoratedAwsGlue.createDevEndpoint(createDevEndpointRequest);
-    }
-
-    @Override
-    public CreateJobResult createJob(CreateJobRequest createJobRequest) {
-        return decoratedAwsGlue.createJob(createJobRequest);
-    }
-
-    @Override
-    public CreateMLTransformResult createMLTransform(CreateMLTransformRequest createMLTransformRequest) {
-        return decoratedAwsGlue.createMLTransform(createMLTransformRequest);
-    }
-
-    @Override
-    public CreatePartitionResult createPartition(CreatePartitionRequest createPartitionRequest) {
-        return decoratedAwsGlue.createPartition(createPartitionRequest);
-    }
-
-    @Override
-    public CreatePartitionIndexResult createPartitionIndex(CreatePartitionIndexRequest createPartitionIndexRequest) {
-        return null;
-    }
-
-    @Override
-    public CreateRegistryResult createRegistry(CreateRegistryRequest createRegistryRequest) {
-        return null;
-    }
-
-    @Override
-    public CreateSchemaResult createSchema(CreateSchemaRequest createSchemaRequest) {
-        return null;
-    }
-
-    @Override
-    public CreateScriptResult createScript(CreateScriptRequest createScriptRequest) {
-        return decoratedAwsGlue.createScript(createScriptRequest);
-    }
-
-    @Override
-    public CreateSecurityConfigurationResult createSecurityConfiguration(CreateSecurityConfigurationRequest createSecurityConfigurationRequest) {
-        return decoratedAwsGlue.createSecurityConfiguration(createSecurityConfigurationRequest);
-    }
-
-    @Override
-    public CreateSessionResult createSession(CreateSessionRequest createSessionRequest) {
-        return decoratedAwsGlue.createSession(createSessionRequest);
-    }
-
-    @Override
-    public CreateTableResult createTable(CreateTableRequest createTableRequest) {
-        return decoratedAwsGlue.createTable(createTableRequest);
-    }
-
-    @Override
-    public CreateTriggerResult createTrigger(CreateTriggerRequest createTriggerRequest) {
-        return decoratedAwsGlue.createTrigger(createTriggerRequest);
-    }
-
-    @Override
-    public CreateUserDefinedFunctionResult createUserDefinedFunction(CreateUserDefinedFunctionRequest createUserDefinedFunctionRequest) {
-        return decoratedAwsGlue.createUserDefinedFunction(createUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public CreateWorkflowResult createWorkflow(CreateWorkflowRequest createWorkflowRequest) {
-        return decoratedAwsGlue.createWorkflow(createWorkflowRequest);
-    }
-
-    @Override
-    public DeleteBlueprintResult deleteBlueprint(DeleteBlueprintRequest deleteBlueprintRequest) {
-        return decoratedAwsGlue.deleteBlueprint(deleteBlueprintRequest);
-    }
-
-    @Override
-    public DeleteClassifierResult deleteClassifier(DeleteClassifierRequest deleteClassifierRequest) {
-        return decoratedAwsGlue.deleteClassifier(deleteClassifierRequest);
-    }
-
-    @Override
-    public DeleteConnectionResult deleteConnection(DeleteConnectionRequest deleteConnectionRequest) {
-        return decoratedAwsGlue.deleteConnection(deleteConnectionRequest);
-    }
-
-    @Override
-    public DeleteCrawlerResult deleteCrawler(DeleteCrawlerRequest deleteCrawlerRequest) {
-        return decoratedAwsGlue.deleteCrawler(deleteCrawlerRequest);
-    }
-
-    @Override
-    public DeleteCustomEntityTypeResult deleteCustomEntityType(DeleteCustomEntityTypeRequest deleteCustomEntityTypeRequest) {
-        return decoratedAwsGlue.deleteCustomEntityType(deleteCustomEntityTypeRequest);
-    }
-
-    @Override
-    public DeleteDatabaseResult deleteDatabase(DeleteDatabaseRequest deleteDatabaseRequest) {
-        return decoratedAwsGlue.deleteDatabase(deleteDatabaseRequest);
-    }
-
-    @Override
-    public DeleteDevEndpointResult deleteDevEndpoint(DeleteDevEndpointRequest deleteDevEndpointRequest) {
-        return decoratedAwsGlue.deleteDevEndpoint(deleteDevEndpointRequest);
-    }
-
-    @Override
-    public DeleteJobResult deleteJob(DeleteJobRequest deleteJobRequest) {
-        return decoratedAwsGlue.deleteJob(deleteJobRequest);
-    }
-
-    @Override
-    public DeleteMLTransformResult deleteMLTransform(DeleteMLTransformRequest deleteMLTransformRequest) {
-        return decoratedAwsGlue.deleteMLTransform(deleteMLTransformRequest);
-    }
-
-    @Override
-    public DeletePartitionResult deletePartition(DeletePartitionRequest deletePartitionRequest) {
-        return decoratedAwsGlue.deletePartition(deletePartitionRequest);
-    }
-
-    @Override
-    public DeletePartitionIndexResult deletePartitionIndex(DeletePartitionIndexRequest deletePartitionIndexRequest) {
-        return null;
-    }
-
-    @Override
-    public DeleteRegistryResult deleteRegistry(DeleteRegistryRequest deleteRegistryRequest) {
-        return null;
-    }
-
-    @Override
-    public DeleteResourcePolicyResult deleteResourcePolicy(DeleteResourcePolicyRequest deleteResourcePolicyRequest) {
-        return decoratedAwsGlue.deleteResourcePolicy(deleteResourcePolicyRequest);
-    }
-
-    @Override
-    public DeleteSchemaResult deleteSchema(DeleteSchemaRequest deleteSchemaRequest) {
-        return null;
-    }
-
-    @Override
-    public DeleteSchemaVersionsResult deleteSchemaVersions(DeleteSchemaVersionsRequest deleteSchemaVersionsRequest) {
-        return null;
-    }
-
-    @Override
-    public DeleteSecurityConfigurationResult deleteSecurityConfiguration(DeleteSecurityConfigurationRequest deleteSecurityConfigurationRequest) {
-        return decoratedAwsGlue.deleteSecurityConfiguration(deleteSecurityConfigurationRequest);
-    }
-
-    @Override
-    public DeleteSessionResult deleteSession(DeleteSessionRequest deleteSessionRequest) {
-        return decoratedAwsGlue.deleteSession(deleteSessionRequest);
-    }
-
-    @Override
-    public DeleteTableResult deleteTable(DeleteTableRequest deleteTableRequest) {
-        return decoratedAwsGlue.deleteTable(deleteTableRequest);
-    }
-
-    @Override
-    public DeleteTableVersionResult deleteTableVersion(DeleteTableVersionRequest deleteTableVersionRequest) {
-        return decoratedAwsGlue.deleteTableVersion(deleteTableVersionRequest);
-    }
-
-    @Override
-    public DeleteTriggerResult deleteTrigger(DeleteTriggerRequest deleteTriggerRequest) {
-        return decoratedAwsGlue.deleteTrigger(deleteTriggerRequest);
-    }
-
-    @Override
-    public DeleteUserDefinedFunctionResult deleteUserDefinedFunction(DeleteUserDefinedFunctionRequest deleteUserDefinedFunctionRequest) {
-        return decoratedAwsGlue.deleteUserDefinedFunction(deleteUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public DeleteWorkflowResult deleteWorkflow(DeleteWorkflowRequest deleteWorkflowRequest) {
-        return decoratedAwsGlue.deleteWorkflow(deleteWorkflowRequest);
-    }
-
-    @Override
-    public GetBlueprintResult getBlueprint(GetBlueprintRequest getBlueprintRequest) {
-        return decoratedAwsGlue.getBlueprint(getBlueprintRequest);
-    }
-
-    @Override
-    public GetBlueprintRunResult getBlueprintRun(GetBlueprintRunRequest getBlueprintRunRequest) {
-        return decoratedAwsGlue.getBlueprintRun(getBlueprintRunRequest);
-    }
-
-    @Override
-    public GetBlueprintRunsResult getBlueprintRuns(GetBlueprintRunsRequest getBlueprintRunsRequest) {
-        return decoratedAwsGlue.getBlueprintRuns(getBlueprintRunsRequest);
-    }
-
-    @Override
-    public GetCatalogImportStatusResult getCatalogImportStatus(GetCatalogImportStatusRequest getCatalogImportStatusRequest) {
-        return decoratedAwsGlue.getCatalogImportStatus(getCatalogImportStatusRequest);
-    }
-
-    @Override
-    public GetClassifierResult getClassifier(GetClassifierRequest getClassifierRequest) {
-        return decoratedAwsGlue.getClassifier(getClassifierRequest);
-    }
-
-    @Override
-    public GetClassifiersResult getClassifiers(GetClassifiersRequest getClassifiersRequest) {
-        return decoratedAwsGlue.getClassifiers(getClassifiersRequest);
-    }
-
-    @Override
-    public GetConnectionResult getConnection(GetConnectionRequest getConnectionRequest) {
-        return decoratedAwsGlue.getConnection(getConnectionRequest);
-    }
-
-    @Override
-    public GetConnectionsResult getConnections(GetConnectionsRequest getConnectionsRequest) {
-        return decoratedAwsGlue.getConnections(getConnectionsRequest);
-    }
-
-    @Override
-    public GetCrawlerResult getCrawler(GetCrawlerRequest getCrawlerRequest) {
-        return decoratedAwsGlue.getCrawler(getCrawlerRequest);
-    }
-
-    @Override
-    public GetCrawlerMetricsResult getCrawlerMetrics(GetCrawlerMetricsRequest getCrawlerMetricsRequest) {
-        return decoratedAwsGlue.getCrawlerMetrics(getCrawlerMetricsRequest);
-    }
-
-    @Override
-    public GetCrawlersResult getCrawlers(GetCrawlersRequest getCrawlersRequest) {
-        return decoratedAwsGlue.getCrawlers(getCrawlersRequest);
-    }
-
-    @Override
-    public GetCustomEntityTypeResult getCustomEntityType(GetCustomEntityTypeRequest getCustomEntityTypeRequest) {
-        return decoratedAwsGlue.getCustomEntityType(getCustomEntityTypeRequest);
-    }
-
-    @Override
-    public GetDataCatalogEncryptionSettingsResult getDataCatalogEncryptionSettings(GetDataCatalogEncryptionSettingsRequest getDataCatalogEncryptionSettingsRequest) {
-        return decoratedAwsGlue.getDataCatalogEncryptionSettings(getDataCatalogEncryptionSettingsRequest);
-    }
-
-    @Override
-    public GetDatabaseResult getDatabase(GetDatabaseRequest getDatabaseRequest) {
-        return decoratedAwsGlue.getDatabase(getDatabaseRequest);
-    }
-
-    @Override
-    public GetDatabasesResult getDatabases(GetDatabasesRequest getDatabasesRequest) {
-        return decoratedAwsGlue.getDatabases(getDatabasesRequest);
-    }
-
-    @Override
-    public GetDataflowGraphResult getDataflowGraph(GetDataflowGraphRequest getDataflowGraphRequest) {
-        return decoratedAwsGlue.getDataflowGraph(getDataflowGraphRequest);
-    }
-
-    @Override
-    public GetDevEndpointResult getDevEndpoint(GetDevEndpointRequest getDevEndpointRequest) {
-        return decoratedAwsGlue.getDevEndpoint(getDevEndpointRequest);
-    }
-
-    @Override
-    public GetDevEndpointsResult getDevEndpoints(GetDevEndpointsRequest getDevEndpointsRequest) {
-        return decoratedAwsGlue.getDevEndpoints(getDevEndpointsRequest);
-    }
-
-    @Override
-    public GetJobResult getJob(GetJobRequest getJobRequest) {
-        return decoratedAwsGlue.getJob(getJobRequest);
-    }
-
-    @Override
-    public GetJobBookmarkResult getJobBookmark(GetJobBookmarkRequest getJobBookmarkRequest) {
-        return decoratedAwsGlue.getJobBookmark(getJobBookmarkRequest);
-    }
-
-    @Override
-    public GetJobRunResult getJobRun(GetJobRunRequest getJobRunRequest) {
-        return decoratedAwsGlue.getJobRun(getJobRunRequest);
-    }
-
-    @Override
-    public GetJobRunsResult getJobRuns(GetJobRunsRequest getJobRunsRequest) {
-        return decoratedAwsGlue.getJobRuns(getJobRunsRequest);
-    }
-
-    @Override
-    public GetJobsResult getJobs(GetJobsRequest getJobsRequest) {
-        return decoratedAwsGlue.getJobs(getJobsRequest);
-    }
-
-    @Override
-    public GetMLTaskRunResult getMLTaskRun(GetMLTaskRunRequest getMLTaskRunRequest) {
-        return decoratedAwsGlue.getMLTaskRun(getMLTaskRunRequest);
-    }
-
-    @Override
-    public GetMLTaskRunsResult getMLTaskRuns(GetMLTaskRunsRequest getMLTaskRunsRequest) {
-        return decoratedAwsGlue.getMLTaskRuns(getMLTaskRunsRequest);
-    }
-
-    @Override
-    public GetMLTransformResult getMLTransform(GetMLTransformRequest getMLTransformRequest) {
-        return decoratedAwsGlue.getMLTransform(getMLTransformRequest);
-    }
-
-    @Override
-    public GetMLTransformsResult getMLTransforms(GetMLTransformsRequest getMLTransformsRequest) {
-        return decoratedAwsGlue.getMLTransforms(getMLTransformsRequest);
-    }
-
-    @Override
-    public GetMappingResult getMapping(GetMappingRequest getMappingRequest) {
-        return decoratedAwsGlue.getMapping(getMappingRequest);
-    }
-
-    @Override
-    public GetPartitionIndexesResult getPartitionIndexes(GetPartitionIndexesRequest getPartitionIndexesRequest) {
-        return decoratedAwsGlue.getPartitionIndexes(getPartitionIndexesRequest);
-    }
-
-    @Override
-    public GetPartitionResult getPartition(GetPartitionRequest getPartitionRequest) {
-        return decoratedAwsGlue.getPartition(getPartitionRequest);
-    }
-
-    @Override
-    public GetPartitionsResult getPartitions(GetPartitionsRequest getPartitionsRequest) {
-        return decoratedAwsGlue.getPartitions(getPartitionsRequest);
-    }
-
-    @Override
-    public GetPlanResult getPlan(GetPlanRequest getPlanRequest) {
-        return decoratedAwsGlue.getPlan(getPlanRequest);
-    }
-
-    @Override
-    public GetRegistryResult getRegistry(GetRegistryRequest getRegistryRequest) {
-        return null;
-    }
-
-    @Override
-    public GetResourcePolicyResult getResourcePolicy(GetResourcePolicyRequest getResourcePolicyRequest) {
-        return decoratedAwsGlue.getResourcePolicy(getResourcePolicyRequest);
-    }
-
-    @Override
-    public GetSchemaResult getSchema(GetSchemaRequest getSchemaRequest) {
-        return null;
-    }
-
-    @Override
-    public GetSchemaByDefinitionResult getSchemaByDefinition(GetSchemaByDefinitionRequest getSchemaByDefinitionRequest) {
-        return null;
-    }
-
-    @Override
-    public GetSchemaVersionResult getSchemaVersion(GetSchemaVersionRequest getSchemaVersionRequest) {
-        return null;
-    }
-
-    @Override
-    public GetSchemaVersionsDiffResult getSchemaVersionsDiff(GetSchemaVersionsDiffRequest getSchemaVersionsDiffRequest) {
-        return null;
-    }
-
-    @Override
-    public GetSecurityConfigurationResult getSecurityConfiguration(GetSecurityConfigurationRequest getSecurityConfigurationRequest) {
-        return decoratedAwsGlue.getSecurityConfiguration(getSecurityConfigurationRequest);
-    }
-
-    @Override
-    public GetSecurityConfigurationsResult getSecurityConfigurations(GetSecurityConfigurationsRequest getSecurityConfigurationsRequest) {
-        return decoratedAwsGlue.getSecurityConfigurations(getSecurityConfigurationsRequest);
-    }
-
-    @Override
-    public GetSessionResult getSession(GetSessionRequest getSessionRequest) {
-        return decoratedAwsGlue.getSession(getSessionRequest);
-    }
-
-    @Override
-    public GetStatementResult getStatement(GetStatementRequest getStatementRequest) {
-        return decoratedAwsGlue.getStatement(getStatementRequest);
-    }
-
-    @Override
-    public GetTableResult getTable(GetTableRequest getTableRequest) {
-        return decoratedAwsGlue.getTable(getTableRequest);
-    }
-
-    @Override
-    public GetTableVersionResult getTableVersion(GetTableVersionRequest getTableVersionRequest) {
-        return decoratedAwsGlue.getTableVersion(getTableVersionRequest);
-    }
-
-    @Override
-    public GetTableVersionsResult getTableVersions(GetTableVersionsRequest getTableVersionsRequest) {
-        return decoratedAwsGlue.getTableVersions(getTableVersionsRequest);
-    }
-
-    @Override
-    public GetTablesResult getTables(GetTablesRequest getTablesRequest) {
-        return decoratedAwsGlue.getTables(getTablesRequest);
-    }
-
-    @Override
-    public GetTagsResult getTags(GetTagsRequest getTagsRequest) {
-        return decoratedAwsGlue.getTags(getTagsRequest);
-    }
-
-    @Override
-    public GetTriggerResult getTrigger(GetTriggerRequest getTriggerRequest) {
-        return decoratedAwsGlue.getTrigger(getTriggerRequest);
-    }
-
-    @Override
-    public GetTriggersResult getTriggers(GetTriggersRequest getTriggersRequest) {
-        return decoratedAwsGlue.getTriggers(getTriggersRequest);
-    }
-
-    @Override
-    public GetUnfilteredPartitionMetadataResult getUnfilteredPartitionMetadata(GetUnfilteredPartitionMetadataRequest getUnfilteredPartitionMetadataRequest) {
-        return decoratedAwsGlue.getUnfilteredPartitionMetadata(getUnfilteredPartitionMetadataRequest);
-    }
-
-    @Override
-    public GetUnfilteredPartitionsMetadataResult getUnfilteredPartitionsMetadata(GetUnfilteredPartitionsMetadataRequest getUnfilteredPartitionsMetadataRequest) {
-        return decoratedAwsGlue.getUnfilteredPartitionsMetadata(getUnfilteredPartitionsMetadataRequest);
-    }
-
-    @Override
-    public GetUnfilteredTableMetadataResult getUnfilteredTableMetadata(GetUnfilteredTableMetadataRequest getUnfilteredTableMetadataRequest) {
-        return decoratedAwsGlue.getUnfilteredTableMetadata(getUnfilteredTableMetadataRequest);
-    }
-
-    @Override
-    public GetUserDefinedFunctionResult getUserDefinedFunction(GetUserDefinedFunctionRequest getUserDefinedFunctionRequest) {
-        return decoratedAwsGlue.getUserDefinedFunction(getUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public GetUserDefinedFunctionsResult getUserDefinedFunctions(GetUserDefinedFunctionsRequest getUserDefinedFunctionsRequest) {
-        return decoratedAwsGlue.getUserDefinedFunctions(getUserDefinedFunctionsRequest);
-    }
-
-    @Override
-    public GetWorkflowResult getWorkflow(GetWorkflowRequest getWorkflowRequest) {
-        return decoratedAwsGlue.getWorkflow(getWorkflowRequest);
-    }
-
-    @Override
-    public GetWorkflowRunResult getWorkflowRun(GetWorkflowRunRequest getWorkflowRunRequest) {
-        return decoratedAwsGlue.getWorkflowRun(getWorkflowRunRequest);
-    }
-
-    @Override
-    public GetWorkflowRunPropertiesResult getWorkflowRunProperties(GetWorkflowRunPropertiesRequest getWorkflowRunPropertiesRequest) {
-        return decoratedAwsGlue.getWorkflowRunProperties(getWorkflowRunPropertiesRequest);
-    }
-
-    @Override
-    public GetWorkflowRunsResult getWorkflowRuns(GetWorkflowRunsRequest getWorkflowRunsRequest) {
-        return decoratedAwsGlue.getWorkflowRuns(getWorkflowRunsRequest);
-    }
-
-    @Override
-    public ImportCatalogToGlueResult importCatalogToGlue(ImportCatalogToGlueRequest importCatalogToGlueRequest) {
-        return decoratedAwsGlue.importCatalogToGlue(importCatalogToGlueRequest);
-    }
-
-    @Override
-    public ListBlueprintsResult listBlueprints(ListBlueprintsRequest listBlueprintsRequest) {
-        return decoratedAwsGlue.listBlueprints(listBlueprintsRequest);
-    }
-
-    @Override
-    public ListCrawlersResult listCrawlers(ListCrawlersRequest listCrawlersRequest) {
-        return decoratedAwsGlue.listCrawlers(listCrawlersRequest);
-    }
-
-    @Override
-    public ListCrawlsResult listCrawls(ListCrawlsRequest listCrawlsRequest) {
-        return decoratedAwsGlue.listCrawls(listCrawlsRequest);
-    }
-
-    @Override
-    public ListCustomEntityTypesResult listCustomEntityTypes(ListCustomEntityTypesRequest listCustomEntityTypesRequest) {
-        return decoratedAwsGlue.listCustomEntityTypes(listCustomEntityTypesRequest);
-    }
-
-    @Override
-    public ListDevEndpointsResult listDevEndpoints(ListDevEndpointsRequest listDevEndpointsRequest) {
-        return decoratedAwsGlue.listDevEndpoints(listDevEndpointsRequest);
-    }
-
-    @Override
-    public ListJobsResult listJobs(ListJobsRequest listJobsRequest) {
-        return decoratedAwsGlue.listJobs(listJobsRequest);
-    }
-
-    @Override
-    public ListMLTransformsResult listMLTransforms(ListMLTransformsRequest listMLTransformsRequest) {
-        return decoratedAwsGlue.listMLTransforms(listMLTransformsRequest);
-    }
-
-    @Override
-    public ListRegistriesResult listRegistries(ListRegistriesRequest listRegistriesRequest) {
-        return null;
-    }
-
-    @Override
-    public ListSchemaVersionsResult listSchemaVersions(ListSchemaVersionsRequest listSchemaVersionsRequest) {
-        return null;
-    }
-
-    @Override
-    public ListSchemasResult listSchemas(ListSchemasRequest listSchemasRequest) {
-        return null;
-    }
-
-    @Override
-    public ListSessionsResult listSessions(ListSessionsRequest listSessionsRequest) {
-        return decoratedAwsGlue.listSessions(listSessionsRequest);
-    }
-
-    @Override
-    public ListStatementsResult listStatements(ListStatementsRequest listStatementsRequest) {
-        return decoratedAwsGlue.listStatements(listStatementsRequest);
-    }
-
-    @Override
-    public ListTriggersResult listTriggers(ListTriggersRequest listTriggersRequest) {
-        return decoratedAwsGlue.listTriggers(listTriggersRequest);
-    }
-
-    @Override
-    public ListWorkflowsResult listWorkflows(ListWorkflowsRequest listWorkflowsRequest) {
-        return decoratedAwsGlue.listWorkflows(listWorkflowsRequest);
-    }
-
-    @Override
-    public PutDataCatalogEncryptionSettingsResult putDataCatalogEncryptionSettings(PutDataCatalogEncryptionSettingsRequest putDataCatalogEncryptionSettingsRequest) {
-        return decoratedAwsGlue.putDataCatalogEncryptionSettings(putDataCatalogEncryptionSettingsRequest);
-    }
-
-    @Override
-    public PutResourcePolicyResult putResourcePolicy(PutResourcePolicyRequest putResourcePolicyRequest) {
-        return decoratedAwsGlue.putResourcePolicy(putResourcePolicyRequest);
-    }
-
-    @Override
-    public PutSchemaVersionMetadataResult putSchemaVersionMetadata(PutSchemaVersionMetadataRequest putSchemaVersionMetadataRequest) {
-        return null;
-    }
-
-    @Override
-    public PutWorkflowRunPropertiesResult putWorkflowRunProperties(PutWorkflowRunPropertiesRequest putWorkflowRunPropertiesRequest) {
-        return decoratedAwsGlue.putWorkflowRunProperties(putWorkflowRunPropertiesRequest);
-    }
-
-    @Override
-    public QuerySchemaVersionMetadataResult querySchemaVersionMetadata(QuerySchemaVersionMetadataRequest querySchemaVersionMetadataRequest) {
-        return null;
-    }
-
-    @Override
-    public RegisterSchemaVersionResult registerSchemaVersion(RegisterSchemaVersionRequest registerSchemaVersionRequest) {
-        return null;
-    }
-
-    @Override
-    public RemoveSchemaVersionMetadataResult removeSchemaVersionMetadata(RemoveSchemaVersionMetadataRequest removeSchemaVersionMetadataRequest) {
-        return null;
-    }
-
-    @Override
-    public ResetJobBookmarkResult resetJobBookmark(ResetJobBookmarkRequest resetJobBookmarkRequest) {
-        return decoratedAwsGlue.resetJobBookmark(resetJobBookmarkRequest);
-    }
-
-    @Override
-    public SearchTablesResult searchTables(SearchTablesRequest searchTablesRequest) {
-        return decoratedAwsGlue.searchTables(searchTablesRequest);
-    }
-
-    @Override
-    public StartBlueprintRunResult startBlueprintRun(StartBlueprintRunRequest startBlueprintRunRequest) {
-        return decoratedAwsGlue.startBlueprintRun(startBlueprintRunRequest);
-    }
-
-    @Override
-    public StartCrawlerResult startCrawler(StartCrawlerRequest startCrawlerRequest) {
-        return decoratedAwsGlue.startCrawler(startCrawlerRequest);
-    }
-
-    @Override
-    public StartCrawlerScheduleResult startCrawlerSchedule(StartCrawlerScheduleRequest startCrawlerScheduleRequest) {
-        return decoratedAwsGlue.startCrawlerSchedule(startCrawlerScheduleRequest);
-    }
-
-    @Override
-    public StartExportLabelsTaskRunResult startExportLabelsTaskRun(StartExportLabelsTaskRunRequest startExportLabelsTaskRunRequest) {
-        return decoratedAwsGlue.startExportLabelsTaskRun(startExportLabelsTaskRunRequest);
-    }
-
-    @Override
-    public StartImportLabelsTaskRunResult startImportLabelsTaskRun(StartImportLabelsTaskRunRequest startImportLabelsTaskRunRequest) {
-        return decoratedAwsGlue.startImportLabelsTaskRun(startImportLabelsTaskRunRequest);
-    }
-
-    @Override
-    public StartJobRunResult startJobRun(StartJobRunRequest startJobRunRequest) {
-        return decoratedAwsGlue.startJobRun(startJobRunRequest);
-    }
-
-    @Override
-    public StartMLEvaluationTaskRunResult startMLEvaluationTaskRun(StartMLEvaluationTaskRunRequest startMLEvaluationTaskRunRequest) {
-        return decoratedAwsGlue.startMLEvaluationTaskRun(startMLEvaluationTaskRunRequest);
-    }
-
-    @Override
-    public StartMLLabelingSetGenerationTaskRunResult startMLLabelingSetGenerationTaskRun(StartMLLabelingSetGenerationTaskRunRequest startMLLabelingSetGenerationTaskRunRequest) {
-        return decoratedAwsGlue.startMLLabelingSetGenerationTaskRun(startMLLabelingSetGenerationTaskRunRequest);
-    }
-
-    @Override
-    public StartTriggerResult startTrigger(StartTriggerRequest startTriggerRequest) {
-        return decoratedAwsGlue.startTrigger(startTriggerRequest);
-    }
-
-    @Override
-    public StartWorkflowRunResult startWorkflowRun(StartWorkflowRunRequest startWorkflowRunRequest) {
-        return decoratedAwsGlue.startWorkflowRun(startWorkflowRunRequest);
-    }
-
-    @Override
-    public StopCrawlerResult stopCrawler(StopCrawlerRequest stopCrawlerRequest) {
-        return decoratedAwsGlue.stopCrawler(stopCrawlerRequest);
-    }
-
-    @Override
-    public StopCrawlerScheduleResult stopCrawlerSchedule(StopCrawlerScheduleRequest stopCrawlerScheduleRequest) {
-        return decoratedAwsGlue.stopCrawlerSchedule(stopCrawlerScheduleRequest);
-    }
-
-    @Override
-    public StopSessionResult stopSession(StopSessionRequest stopSessionRequest) {
-        return decoratedAwsGlue.stopSession(stopSessionRequest);
-    }
-
-    @Override
-    public StopTriggerResult stopTrigger(StopTriggerRequest stopTriggerRequest) {
-        return decoratedAwsGlue.stopTrigger(stopTriggerRequest);
-    }
-
-    @Override
-    public StopWorkflowRunResult stopWorkflowRun(StopWorkflowRunRequest stopWorkflowRunRequest) {
-        return decoratedAwsGlue.stopWorkflowRun(stopWorkflowRunRequest);
-    }
-
-    @Override
-    public TagResourceResult tagResource(TagResourceRequest tagResourceRequest) {
-        return decoratedAwsGlue.tagResource(tagResourceRequest);
-    }
-
-    @Override
-    public UntagResourceResult untagResource(UntagResourceRequest untagResourceRequest) {
-        return decoratedAwsGlue.untagResource(untagResourceRequest);
-    }
-
-    @Override
-    public UpdateBlueprintResult updateBlueprint(UpdateBlueprintRequest updateBlueprintRequest) {
-        return decoratedAwsGlue.updateBlueprint(updateBlueprintRequest);
-    }
-
-    @Override
-    public UpdateClassifierResult updateClassifier(UpdateClassifierRequest updateClassifierRequest) {
-        return decoratedAwsGlue.updateClassifier(updateClassifierRequest);
-    }
-
-    @Override
-    public UpdateConnectionResult updateConnection(UpdateConnectionRequest updateConnectionRequest) {
-        return decoratedAwsGlue.updateConnection(updateConnectionRequest);
-    }
-
-    @Override
-    public UpdateCrawlerResult updateCrawler(UpdateCrawlerRequest updateCrawlerRequest) {
-        return decoratedAwsGlue.updateCrawler(updateCrawlerRequest);
-    }
-
-    @Override
-    public UpdateCrawlerScheduleResult updateCrawlerSchedule(UpdateCrawlerScheduleRequest updateCrawlerScheduleRequest) {
-        return decoratedAwsGlue.updateCrawlerSchedule(updateCrawlerScheduleRequest);
-    }
-
-    @Override
-    public UpdateDatabaseResult updateDatabase(UpdateDatabaseRequest updateDatabaseRequest) {
-        return decoratedAwsGlue.updateDatabase(updateDatabaseRequest);
-    }
-
-    @Override
-    public UpdateDevEndpointResult updateDevEndpoint(UpdateDevEndpointRequest updateDevEndpointRequest) {
-        return decoratedAwsGlue.updateDevEndpoint(updateDevEndpointRequest);
-    }
-
-    @Override
-    public UpdateJobResult updateJob(UpdateJobRequest updateJobRequest) {
-        return decoratedAwsGlue.updateJob(updateJobRequest);
-    }
-
-    @Override
-    public UpdateMLTransformResult updateMLTransform(UpdateMLTransformRequest updateMLTransformRequest) {
-        return decoratedAwsGlue.updateMLTransform(updateMLTransformRequest);
-    }
-
-    @Override
-    public UpdatePartitionResult updatePartition(UpdatePartitionRequest updatePartitionRequest) {
-        return decoratedAwsGlue.updatePartition(updatePartitionRequest);
-    }
-
-    @Override
-    public UpdateRegistryResult updateRegistry(UpdateRegistryRequest updateRegistryRequest) {
-        return null;
-    }
-
-    @Override
-    public UpdateSchemaResult updateSchema(UpdateSchemaRequest updateSchemaRequest) {
-        return null;
-    }
-
-    @Override
-    public UpdateTableResult updateTable(UpdateTableRequest updateTableRequest) {
-        return decoratedAwsGlue.updateTable(updateTableRequest);
-    }
-
-    @Override
-    public UpdateTriggerResult updateTrigger(UpdateTriggerRequest updateTriggerRequest) {
-        return decoratedAwsGlue.updateTrigger(updateTriggerRequest);
-    }
-
-    @Override
-    public UpdateUserDefinedFunctionResult updateUserDefinedFunction(UpdateUserDefinedFunctionRequest updateUserDefinedFunctionRequest) {
-        return decoratedAwsGlue.updateUserDefinedFunction(updateUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public UpdateWorkflowResult updateWorkflow(UpdateWorkflowRequest updateWorkflowRequest) {
-        return decoratedAwsGlue.updateWorkflow(updateWorkflowRequest);
-    }
-
-    @Override
-    public void shutdown() {
-        decoratedAwsGlue.shutdown();
-    }
-
-    @Override
-    public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest amazonWebServiceRequest) {
-        return decoratedAwsGlue.getCachedResponseMetadata(amazonWebServiceRequest);
-    }
-
-
-    @Override
-    public UpdateColumnStatisticsForTableResult updateColumnStatisticsForTable(UpdateColumnStatisticsForTableRequest updateColumnStatisticsForTableRequest) {
-        return decoratedAwsGlue.updateColumnStatisticsForTable(updateColumnStatisticsForTableRequest);
-    }
-
-    @Override
-    public UpdateColumnStatisticsForPartitionResult updateColumnStatisticsForPartition(UpdateColumnStatisticsForPartitionRequest updateColumnStatisticsForPartitionRequest) {
-        return decoratedAwsGlue.updateColumnStatisticsForPartition(updateColumnStatisticsForPartitionRequest);
-    }
-
-    @Override
-    public ResumeWorkflowRunResult resumeWorkflowRun(ResumeWorkflowRunRequest resumeWorkflowRunRequest) {
-        return decoratedAwsGlue.resumeWorkflowRun(resumeWorkflowRunRequest);
-    }
-
-    @Override
-    public RunStatementResult runStatement(RunStatementRequest runStatementRequest) {
-        return decoratedAwsGlue.runStatement(runStatementRequest);
-    }
-
-    @Override
-    public GetResourcePoliciesResult getResourcePolicies(GetResourcePoliciesRequest getResourcePoliciesRequest) {
-        return decoratedAwsGlue.getResourcePolicies(getResourcePoliciesRequest);
-    }
-
-    @Override
-    public GetColumnStatisticsForTableResult getColumnStatisticsForTable(GetColumnStatisticsForTableRequest getColumnStatisticsForTableRequest) {
-        return decoratedAwsGlue.getColumnStatisticsForTable(getColumnStatisticsForTableRequest);
-    }
-
-    @Override
-    public GetColumnStatisticsForPartitionResult getColumnStatisticsForPartition(GetColumnStatisticsForPartitionRequest getColumnStatisticsForPartitionRequest) {
-        return decoratedAwsGlue.getColumnStatisticsForPartition(getColumnStatisticsForPartitionRequest);
-    }
-
-    @Override
-    public DeleteColumnStatisticsForTableResult deleteColumnStatisticsForTable(DeleteColumnStatisticsForTableRequest deleteColumnStatisticsForTableRequest) {
-        return decoratedAwsGlue.deleteColumnStatisticsForTable(deleteColumnStatisticsForTableRequest);
-    }
-
-    @Override
-    public DeleteColumnStatisticsForPartitionResult deleteColumnStatisticsForPartition(DeleteColumnStatisticsForPartitionRequest deleteColumnStatisticsForPartitionRequest) {
-        return decoratedAwsGlue.deleteColumnStatisticsForPartition(deleteColumnStatisticsForPartitionRequest);
-    }
-
-    @Override
-    public BatchGetBlueprintsResult batchGetBlueprints(BatchGetBlueprintsRequest batchGetBlueprintsRequest) {
-        return decoratedAwsGlue.batchGetBlueprints(batchGetBlueprintsRequest);
-    }
-}
-
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java
deleted file mode 100644
index ec74797bfa..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ColumnStatisticsError;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.DatabaseInput;
-import com.amazonaws.services.glue.model.Partition;
-import com.amazonaws.services.glue.model.PartitionError;
-import com.amazonaws.services.glue.model.PartitionInput;
-import com.amazonaws.services.glue.model.PartitionValueList;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.glue.model.TableInput;
-import com.amazonaws.services.glue.model.UserDefinedFunction;
-import com.amazonaws.services.glue.model.UserDefinedFunctionInput;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.thrift.TException;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * This is the accessor interface for using AWS Glue as a metastore.
- * The generic AWSGlue interface{@link com.amazonaws.services.glue.AWSGlue}
- * has a number of methods that are irrelevant for clients using Glue only
- * as a metastore.
- * Think of this interface as a wrapper over AWSGlue. This additional layer
- * of abstraction achieves the following -
- * a) Hides the non-metastore related operations present in AWSGlue
- * b) Hides away the batching and pagination related limitations of AWSGlue
- */
-public interface AWSGlueMetastore {
-
-    void createDatabase(DatabaseInput databaseInput);
-
-    Database getDatabase(String dbName);
-
-    List<Database> getAllDatabases();
-
-    void updateDatabase(String databaseName, DatabaseInput databaseInput);
-
-    void deleteDatabase(String dbName);
-
-    void createTable(String dbName, TableInput tableInput);
-
-    Table getTable(String dbName, String tableName);
-
-    List<Table> getTables(String dbname, String tablePattern);
-
-    void updateTable(String dbName, TableInput tableInput);
-
-    void updateTable(String dbName, TableInput tableInput, EnvironmentContext environmentContext);
-
-    void deleteTable(String dbName, String tableName);
-
-    Partition getPartition(String dbName, String tableName, List<String> partitionValues);
-
-    List<Partition> getPartitionsByNames(String dbName, String tableName,
-            List<PartitionValueList> partitionsToGet);
-
-    List<Partition> getPartitions(String dbName, String tableName, String expression,
-            long max) throws TException;
-
-    void updatePartition(String dbName, String tableName, List<String> partitionValues,
-            PartitionInput partitionInput);
-
-    void deletePartition(String dbName, String tableName, List<String> partitionValues);
-
-    List<PartitionError> createPartitions(String dbName, String tableName,
-            List<PartitionInput> partitionInputs);
-
-    void createUserDefinedFunction(String dbName, UserDefinedFunctionInput functionInput);
-
-    UserDefinedFunction getUserDefinedFunction(String dbName, String functionName);
-
-    List<UserDefinedFunction> getUserDefinedFunctions(String dbName, String pattern);
-
-    List<UserDefinedFunction> getUserDefinedFunctions(String pattern);
-
-    void deleteUserDefinedFunction(String dbName, String functionName);
-
-    void updateUserDefinedFunction(String dbName, String functionName, UserDefinedFunctionInput functionInput);
-
-    void deletePartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, String colName);
-
-    void deleteTableColumnStatistics(String dbName, String tableName, String colName);
-
-    Map<String, List<ColumnStatistics>> getPartitionColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> partitionValues,
-            List<String> columnNames
-    );
-
-    List<ColumnStatistics> getTableColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> colNames
-    );
-
-    List<ColumnStatisticsError> updatePartitionColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> partitionValues,
-            List<ColumnStatistics> columnStatistics
-    );
-
-    List<ColumnStatisticsError> updateTableColumnStatistics(
-            String dbName,
-            String tableName,
-            List<ColumnStatistics> columnStatistics
-    );
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java
deleted file mode 100644
index e8da0056b2..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java
+++ /dev/null
@@ -1,198 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ColumnStatisticsError;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.DatabaseInput;
-import com.amazonaws.services.glue.model.Partition;
-import com.amazonaws.services.glue.model.PartitionError;
-import com.amazonaws.services.glue.model.PartitionInput;
-import com.amazonaws.services.glue.model.PartitionValueList;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.glue.model.TableInput;
-import com.amazonaws.services.glue.model.UserDefinedFunction;
-import com.amazonaws.services.glue.model.UserDefinedFunctionInput;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.thrift.TException;
-
-import java.util.List;
-import java.util.Map;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-public class AWSGlueMetastoreBaseDecorator implements AWSGlueMetastore {
-
-    private final AWSGlueMetastore awsGlueMetastore;
-
-    public AWSGlueMetastoreBaseDecorator(AWSGlueMetastore awsGlueMetastore) {
-        checkNotNull(awsGlueMetastore, "awsGlueMetastore can not be null");
-        this.awsGlueMetastore = awsGlueMetastore;
-    }
-
-    @Override
-    public void createDatabase(DatabaseInput databaseInput) {
-        awsGlueMetastore.createDatabase(databaseInput);
-    }
-
-    @Override
-    public Database getDatabase(String dbName) {
-        return awsGlueMetastore.getDatabase(dbName);
-    }
-
-    @Override
-    public List<Database> getAllDatabases() {
-        return awsGlueMetastore.getAllDatabases();
-    }
-
-    @Override
-    public void updateDatabase(String databaseName, DatabaseInput databaseInput) {
-        awsGlueMetastore.updateDatabase(databaseName, databaseInput);
-    }
-
-    @Override
-    public void deleteDatabase(String dbName) {
-        awsGlueMetastore.deleteDatabase(dbName);
-    }
-
-    @Override
-    public void createTable(String dbName, TableInput tableInput) {
-        awsGlueMetastore.createTable(dbName, tableInput);
-    }
-
-    @Override
-    public Table getTable(String dbName, String tableName) {
-        return awsGlueMetastore.getTable(dbName, tableName);
-    }
-
-    @Override
-    public List<Table> getTables(String dbname, String tablePattern) {
-        return awsGlueMetastore.getTables(dbname, tablePattern);
-    }
-
-    @Override
-    public void updateTable(String dbName, TableInput tableInput) {
-        awsGlueMetastore.updateTable(dbName, tableInput);
-    }
-
-    @Override
-    public void updateTable(String dbName, TableInput tableInput, EnvironmentContext environmentContext) {
-        awsGlueMetastore.updateTable(dbName, tableInput, environmentContext);
-    }
-
-    @Override
-    public void deleteTable(String dbName, String tableName) {
-        awsGlueMetastore.deleteTable(dbName, tableName);
-    }
-
-    @Override
-    public Partition getPartition(String dbName, String tableName, List<String> partitionValues) {
-        return awsGlueMetastore.getPartition(dbName, tableName, partitionValues);
-    }
-
-    @Override
-    public List<Partition> getPartitionsByNames(String dbName, String tableName, List<PartitionValueList> partitionsToGet) {
-        return awsGlueMetastore.getPartitionsByNames(dbName, tableName, partitionsToGet);
-    }
-
-    @Override
-    public List<Partition> getPartitions(String dbName, String tableName, String expression, long max) throws TException {
-        return awsGlueMetastore.getPartitions(dbName, tableName, expression, max);
-    }
-
-    @Override
-    public void updatePartition(String dbName, String tableName, List<String> partitionValues, PartitionInput partitionInput) {
-        awsGlueMetastore.updatePartition(dbName, tableName, partitionValues, partitionInput);
-    }
-
-    @Override
-    public void deletePartition(String dbName, String tableName, List<String> partitionValues) {
-        awsGlueMetastore.deletePartition(dbName, tableName, partitionValues);
-    }
-
-    @Override
-    public List<PartitionError> createPartitions(String dbName, String tableName, List<PartitionInput> partitionInputs) {
-        return awsGlueMetastore.createPartitions(dbName, tableName, partitionInputs);
-    }
-
-    @Override
-    public void createUserDefinedFunction(String dbName, UserDefinedFunctionInput functionInput) {
-        awsGlueMetastore.createUserDefinedFunction(dbName, functionInput);
-    }
-
-    @Override
-    public UserDefinedFunction getUserDefinedFunction(String dbName, String functionName) {
-        return awsGlueMetastore.getUserDefinedFunction(dbName, functionName);
-    }
-
-    @Override
-    public List<UserDefinedFunction> getUserDefinedFunctions(String dbName, String pattern) {
-        return awsGlueMetastore.getUserDefinedFunctions(dbName, pattern);
-    }
-
-    @Override
-    public List<UserDefinedFunction> getUserDefinedFunctions(String pattern) {
-        return awsGlueMetastore.getUserDefinedFunctions(pattern);
-    }
-
-    @Override
-    public void deleteUserDefinedFunction(String dbName, String functionName) {
-        awsGlueMetastore.deleteUserDefinedFunction(dbName, functionName);
-    }
-
-    @Override
-    public void updateUserDefinedFunction(String dbName, String functionName, UserDefinedFunctionInput functionInput) {
-        awsGlueMetastore.updateUserDefinedFunction(dbName, functionName, functionInput);
-    }
-
-    @Override
-    public void deletePartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, String colName) {
-        awsGlueMetastore.deletePartitionColumnStatistics(dbName, tableName, partitionValues, colName);
-    }
-
-    @Override
-    public void deleteTableColumnStatistics(String dbName, String tableName, String colName) {
-        awsGlueMetastore.deleteTableColumnStatistics(dbName, tableName, colName);
-    }
-
-    @Override
-    public Map<String, List<ColumnStatistics>> getPartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, List<String> columnNames) {
-        return awsGlueMetastore.getPartitionColumnStatistics(dbName, tableName, partitionValues, columnNames);
-    }
-
-    @Override
-    public List<ColumnStatistics> getTableColumnStatistics(String dbName, String tableName, List<String> colNames) {
-        return awsGlueMetastore.getTableColumnStatistics(dbName, tableName, colNames);
-    }
-
-    @Override
-    public List<ColumnStatisticsError> updatePartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, List<ColumnStatistics> columnStatistics) {
-        return awsGlueMetastore.updatePartitionColumnStatistics(dbName, tableName, partitionValues, columnStatistics);
-    }
-
-    @Override
-    public List<ColumnStatisticsError> updateTableColumnStatistics(String dbName, String tableName, List<ColumnStatistics> columnStatistics) {
-        return awsGlueMetastore.updateTableColumnStatistics(dbName, tableName, columnStatistics);
-    }
-
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java
deleted file mode 100644
index 7ef0280e15..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java
+++ /dev/null
@@ -1,185 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.Table;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.log4j.Logger;
-
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_DB_CACHE_ENABLE;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_DB_CACHE_SIZE;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_DB_CACHE_TTL_MINS;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_TABLE_CACHE_ENABLE;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_TABLE_CACHE_SIZE;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_TABLE_CACHE_TTL_MINS;
-
-import java.util.Objects;
-import java.util.concurrent.TimeUnit;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-public class AWSGlueMetastoreCacheDecorator extends AWSGlueMetastoreBaseDecorator {
-
-    private static final Logger logger = Logger.getLogger(AWSGlueMetastoreCacheDecorator.class);
-
-    private final Configuration conf;
-
-    private final boolean databaseCacheEnabled;
-
-    private final boolean tableCacheEnabled;
-
-    @VisibleForTesting
-    protected Cache<String, Database> databaseCache;
-    @VisibleForTesting
-    protected Cache<TableIdentifier, Table> tableCache;
-
-    public AWSGlueMetastoreCacheDecorator(Configuration conf, AWSGlueMetastore awsGlueMetastore) {
-        super(awsGlueMetastore);
-
-        checkNotNull(conf, "conf can not be null");
-        this.conf = conf;
-
-        databaseCacheEnabled = conf.getBoolean(AWS_GLUE_DB_CACHE_ENABLE, false);
-        if(databaseCacheEnabled) {
-            int dbCacheSize = conf.getInt(AWS_GLUE_DB_CACHE_SIZE, 0);
-            int dbCacheTtlMins = conf.getInt(AWS_GLUE_DB_CACHE_TTL_MINS, 0);
-
-            //validate config values for size and ttl
-            validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_SIZE, dbCacheSize);
-            validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_TTL_MINS, dbCacheTtlMins);
-
-            //initialize database cache
-            databaseCache = CacheBuilder.newBuilder().maximumSize(dbCacheSize)
-                    .expireAfterWrite(dbCacheTtlMins, TimeUnit.MINUTES).build();
-        } else {
-            databaseCache = null;
-        }
-
-        tableCacheEnabled = conf.getBoolean(AWS_GLUE_TABLE_CACHE_ENABLE, false);
-        if(tableCacheEnabled) {
-            int tableCacheSize = conf.getInt(AWS_GLUE_TABLE_CACHE_SIZE, 0);
-            int tableCacheTtlMins = conf.getInt(AWS_GLUE_TABLE_CACHE_TTL_MINS, 0);
-
-            //validate config values for size and ttl
-            validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_SIZE, tableCacheSize);
-            validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_TTL_MINS, tableCacheTtlMins);
-
-            //initialize table cache
-            tableCache = CacheBuilder.newBuilder().maximumSize(tableCacheSize)
-                    .expireAfterWrite(tableCacheTtlMins, TimeUnit.MINUTES).build();
-        } else {
-            tableCache = null;
-        }
-
-        logger.info("Constructed");
-    }
-
-    private void validateConfigValueIsGreaterThanZero(String configName, int value) {
-        checkArgument(value > 0, String.format("Invalid value for Hive Config %s. " +
-                "Provide a value greater than zero", configName));
-
-    }
-
-    @Override
-    public Database getDatabase(String dbName) {
-        Database result;
-        if(databaseCacheEnabled) {
-            Database valueFromCache = databaseCache.getIfPresent(dbName);
-            if(valueFromCache != null) {
-                logger.info("Cache hit for operation [getDatabase] on key [" + dbName + "]");
-                result = valueFromCache;
-            } else {
-                logger.info("Cache miss for operation [getDatabase] on key [" + dbName + "]");
-                result = super.getDatabase(dbName);
-                databaseCache.put(dbName, result);
-            }
-        } else {
-            result = super.getDatabase(dbName);
-        }
-        return result;
-    }
-
-    @Override
-    public Table getTable(String dbName, String tableName) {
-        Table result;
-        if(tableCacheEnabled) {
-            TableIdentifier key = new TableIdentifier(dbName, tableName);
-            Table valueFromCache = tableCache.getIfPresent(key);
-            if(valueFromCache != null) {
-                logger.info("Cache hit for operation [getTable] on key [" + key + "]");
-                result = valueFromCache;
-            } else {
-                logger.info("Cache miss for operation [getTable] on key [" + key + "]");
-                result = super.getTable(dbName, tableName);
-                tableCache.put(key, result);
-            }
-        } else {
-            result = super.getTable(dbName, tableName);
-        }
-        return result;
-    }
-
-    static class TableIdentifier {
-        private final String dbName;
-        private final String tableName;
-
-        public TableIdentifier(String dbName, String tableName) {
-            this.dbName = dbName;
-            this.tableName = tableName;
-        }
-
-        public String getDbName() {
-            return dbName;
-        }
-
-        public String getTableName() {
-            return tableName;
-        }
-
-        @Override
-        public String toString() {
-            return "TableIdentifier{" +
-                    "dbName='" + dbName + '\'' +
-                    ", tableName='" + tableName + '\'' +
-                    '}';
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) return true;
-            if (o == null || getClass() != o.getClass()) return false;
-            TableIdentifier that = (TableIdentifier) o;
-            return Objects.equals(dbName, that.dbName) &&
-                    Objects.equals(tableName, that.tableName);
-        }
-
-        @Override
-        public int hashCode() {
-            return Objects.hash(dbName, tableName);
-        }
-    }
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java
deleted file mode 100644
index ad0353d096..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.AWSGlue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_DB_CACHE_ENABLE;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_TABLE_CACHE_ENABLE;
-
-public class AWSGlueMetastoreFactory {
-
-    public AWSGlueMetastore newMetastore(Configuration conf) throws MetaException {
-        AWSGlue glueClient = new AWSGlueClientFactory(conf).newClient();
-        AWSGlueMetastore defaultMetastore = new DefaultAWSGlueMetastore(conf, glueClient);
-        if(isCacheEnabled(conf)) {
-            return new AWSGlueMetastoreCacheDecorator(conf, defaultMetastore);
-        }
-        return defaultMetastore;
-    }
-
-    private boolean isCacheEnabled(Configuration conf) {
-        boolean databaseCacheEnabled = conf.getBoolean(AWS_GLUE_DB_CACHE_ENABLE, false);
-        boolean tableCacheEnabled = conf.getBoolean(AWS_GLUE_TABLE_CACHE_ENABLE, false);
-        return (databaseCacheEnabled || tableCacheEnabled);
-    }
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMultipleCatalogDecorator.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMultipleCatalogDecorator.java
deleted file mode 100644
index c94472260d..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMultipleCatalogDecorator.java
+++ /dev/null
@@ -1,370 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.BatchCreatePartitionRequest;
-import com.amazonaws.services.glue.model.BatchCreatePartitionResult;
-import com.amazonaws.services.glue.model.BatchDeletePartitionRequest;
-import com.amazonaws.services.glue.model.BatchDeletePartitionResult;
-import com.amazonaws.services.glue.model.BatchDeleteTableRequest;
-import com.amazonaws.services.glue.model.BatchDeleteTableResult;
-import com.amazonaws.services.glue.model.BatchGetPartitionRequest;
-import com.amazonaws.services.glue.model.BatchGetPartitionResult;
-import com.amazonaws.services.glue.model.CreateDatabaseRequest;
-import com.amazonaws.services.glue.model.CreateDatabaseResult;
-import com.amazonaws.services.glue.model.CreatePartitionRequest;
-import com.amazonaws.services.glue.model.CreatePartitionResult;
-import com.amazonaws.services.glue.model.CreateTableRequest;
-import com.amazonaws.services.glue.model.CreateTableResult;
-import com.amazonaws.services.glue.model.CreateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.CreateUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.DeleteDatabaseRequest;
-import com.amazonaws.services.glue.model.DeleteDatabaseResult;
-import com.amazonaws.services.glue.model.DeletePartitionRequest;
-import com.amazonaws.services.glue.model.DeletePartitionResult;
-import com.amazonaws.services.glue.model.DeleteTableRequest;
-import com.amazonaws.services.glue.model.DeleteTableResult;
-import com.amazonaws.services.glue.model.DeleteUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.DeleteUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.GetDatabaseRequest;
-import com.amazonaws.services.glue.model.GetDatabaseResult;
-import com.amazonaws.services.glue.model.GetPartitionRequest;
-import com.amazonaws.services.glue.model.GetPartitionResult;
-import com.amazonaws.services.glue.model.GetPartitionsRequest;
-import com.amazonaws.services.glue.model.GetPartitionsResult;
-import com.amazonaws.services.glue.model.GetTableRequest;
-import com.amazonaws.services.glue.model.GetTableResult;
-import com.amazonaws.services.glue.model.GetTableVersionsRequest;
-import com.amazonaws.services.glue.model.GetTableVersionsResult;
-import com.amazonaws.services.glue.model.GetTablesRequest;
-import com.amazonaws.services.glue.model.GetTablesResult;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionResult;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsResult;
-import com.amazonaws.services.glue.model.UpdateDatabaseRequest;
-import com.amazonaws.services.glue.model.UpdateDatabaseResult;
-import com.amazonaws.services.glue.model.UpdatePartitionRequest;
-import com.amazonaws.services.glue.model.UpdatePartitionResult;
-import com.amazonaws.services.glue.model.UpdateTableRequest;
-import com.amazonaws.services.glue.model.UpdateTableResult;
-import com.amazonaws.services.glue.model.UpdateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.UpdateUserDefinedFunctionResult;
-import com.google.common.base.Strings;
-
-import java.util.function.Consumer;
-import java.util.function.Supplier;
-
-
-public class AWSGlueMultipleCatalogDecorator extends AWSGlueDecoratorBase {
-
-    // We're not importing this from Hive's Warehouse class as the package name is changed between Hive 1.x and Hive 3.x
-    private static final String DEFAULT_DATABASE_NAME = "default";
-
-    private String catalogSeparator;
-
-    public AWSGlueMultipleCatalogDecorator(AWSGlue awsGlueToBeDecorated, String catalogSeparator) {
-        super(awsGlueToBeDecorated);
-        this.catalogSeparator = catalogSeparator;
-    }
-
-    private void configureRequest(Supplier<String> getDatabaseFunc,
-            Consumer<String> setDatabaseFunc,
-            Consumer<String> setCatalogFunc) {
-        if (!Strings.isNullOrEmpty(this.catalogSeparator) && (getDatabaseFunc.get() != null)
-                && !getDatabaseFunc.get().equals(DEFAULT_DATABASE_NAME)) {
-            String databaseName = getDatabaseFunc.get();
-            int idx = databaseName.indexOf(this.catalogSeparator);
-            if (idx >= 0) {
-                setCatalogFunc.accept(databaseName.substring(0, idx));
-                setDatabaseFunc.accept(databaseName.substring(idx + this.catalogSeparator.length()));
-            }
-        }
-    }
-
-    @Override
-    public BatchCreatePartitionResult batchCreatePartition(BatchCreatePartitionRequest batchCreatePartitionRequest) {
-        configureRequest(
-                batchCreatePartitionRequest::getDatabaseName,
-                batchCreatePartitionRequest::setDatabaseName,
-                batchCreatePartitionRequest::setCatalogId
-        );
-        return super.batchCreatePartition(batchCreatePartitionRequest);
-    }
-
-    @Override
-    public BatchDeletePartitionResult batchDeletePartition(BatchDeletePartitionRequest batchDeletePartitionRequest) {
-        configureRequest(
-                batchDeletePartitionRequest::getDatabaseName,
-                batchDeletePartitionRequest::setDatabaseName,
-                batchDeletePartitionRequest::setCatalogId
-        );
-        return super.batchDeletePartition(batchDeletePartitionRequest);
-    }
-
-    @Override
-    public BatchDeleteTableResult batchDeleteTable(BatchDeleteTableRequest batchDeleteTableRequest) {
-        configureRequest(
-                batchDeleteTableRequest::getDatabaseName,
-                batchDeleteTableRequest::setDatabaseName,
-                batchDeleteTableRequest::setCatalogId
-        );
-        return super.batchDeleteTable(batchDeleteTableRequest);
-    }
-
-    @Override
-    public BatchGetPartitionResult batchGetPartition(BatchGetPartitionRequest batchGetPartitionRequest) {
-        String originalDatabaseName = batchGetPartitionRequest.getDatabaseName();
-        configureRequest(
-                batchGetPartitionRequest::getDatabaseName,
-                batchGetPartitionRequest::setDatabaseName,
-                batchGetPartitionRequest::setCatalogId
-        );
-        BatchGetPartitionResult result = super.batchGetPartition(batchGetPartitionRequest);
-        result.getPartitions().forEach(partition -> partition.setDatabaseName(originalDatabaseName));
-        return result;
-    }
-
-    @Override
-    public CreateDatabaseResult createDatabase(CreateDatabaseRequest createDatabaseRequest) {
-        configureRequest(
-                () -> createDatabaseRequest.getDatabaseInput().getName(),
-                name -> createDatabaseRequest.getDatabaseInput().setName(name),
-                createDatabaseRequest::setCatalogId
-        );
-        return super.createDatabase(createDatabaseRequest);
-    }
-
-    @Override
-    public CreatePartitionResult createPartition(CreatePartitionRequest createPartitionRequest) {
-        configureRequest(
-                createPartitionRequest::getDatabaseName,
-                createPartitionRequest::setDatabaseName,
-                createPartitionRequest::setCatalogId
-        );
-        return super.createPartition(createPartitionRequest);
-    }
-
-    @Override
-    public CreateTableResult createTable(CreateTableRequest createTableRequest) {
-        configureRequest(
-                createTableRequest::getDatabaseName,
-                createTableRequest::setDatabaseName,
-                createTableRequest::setCatalogId
-        );
-        return super.createTable(createTableRequest);
-    }
-
-    @Override
-    public CreateUserDefinedFunctionResult createUserDefinedFunction(CreateUserDefinedFunctionRequest createUserDefinedFunctionRequest) {
-        configureRequest(
-                createUserDefinedFunctionRequest::getDatabaseName,
-                createUserDefinedFunctionRequest::setDatabaseName,
-                createUserDefinedFunctionRequest::setCatalogId
-        );
-        return super.createUserDefinedFunction(createUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public DeleteDatabaseResult deleteDatabase(DeleteDatabaseRequest deleteDatabaseRequest) {
-        configureRequest(
-                deleteDatabaseRequest::getName,
-                deleteDatabaseRequest::setName,
-                deleteDatabaseRequest::setCatalogId
-        );
-        return super.deleteDatabase(deleteDatabaseRequest);
-    }
-
-    @Override
-    public DeletePartitionResult deletePartition(DeletePartitionRequest deletePartitionRequest) {
-        configureRequest(
-                deletePartitionRequest::getDatabaseName,
-                deletePartitionRequest::setDatabaseName,
-                deletePartitionRequest::setCatalogId
-        );
-        return super.deletePartition(deletePartitionRequest);
-    }
-
-    @Override
-    public DeleteTableResult deleteTable(DeleteTableRequest deleteTableRequest) {
-        configureRequest(
-                deleteTableRequest::getDatabaseName,
-                deleteTableRequest::setDatabaseName,
-                deleteTableRequest::setCatalogId
-        );
-        return super.deleteTable(deleteTableRequest);
-    }
-
-    @Override
-    public DeleteUserDefinedFunctionResult deleteUserDefinedFunction(DeleteUserDefinedFunctionRequest deleteUserDefinedFunctionRequest) {
-        configureRequest(
-                deleteUserDefinedFunctionRequest::getDatabaseName,
-                deleteUserDefinedFunctionRequest::setDatabaseName,
-                deleteUserDefinedFunctionRequest::setCatalogId
-        );
-        return super.deleteUserDefinedFunction(deleteUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public GetDatabaseResult getDatabase(GetDatabaseRequest getDatabaseRequest) {
-        String originalDatabaseName = getDatabaseRequest.getName();
-        configureRequest(
-                getDatabaseRequest::getName,
-                getDatabaseRequest::setName,
-                getDatabaseRequest::setCatalogId
-        );
-        GetDatabaseResult result = super.getDatabase(getDatabaseRequest);
-        result.getDatabase().setName(originalDatabaseName);
-        return result;
-    }
-
-    @Override
-    public GetPartitionResult getPartition(GetPartitionRequest getPartitionRequest) {
-        String originalDatabaseName = getPartitionRequest.getDatabaseName();
-        configureRequest(
-                getPartitionRequest::getDatabaseName,
-                getPartitionRequest::setDatabaseName,
-                getPartitionRequest::setCatalogId
-        );
-        GetPartitionResult result = super.getPartition(getPartitionRequest);
-        result.getPartition().setDatabaseName(originalDatabaseName);
-        return result;
-    }
-
-    @Override
-    public GetPartitionsResult getPartitions(GetPartitionsRequest getPartitionsRequest) {
-        String originalDatabaseName = getPartitionsRequest.getDatabaseName();
-        configureRequest(
-                getPartitionsRequest::getDatabaseName,
-                getPartitionsRequest::setDatabaseName,
-                getPartitionsRequest::setCatalogId
-        );
-        GetPartitionsResult result = super.getPartitions(getPartitionsRequest);
-        result.getPartitions().forEach(partition -> partition.setDatabaseName(originalDatabaseName));
-        return result;
-    }
-
-    @Override
-    public GetTableResult getTable(GetTableRequest getTableRequest) {
-        String originalDatabaseName = getTableRequest.getDatabaseName();
-        configureRequest(
-                getTableRequest::getDatabaseName,
-                getTableRequest::setDatabaseName,
-                getTableRequest::setCatalogId
-        );
-        GetTableResult result = super.getTable(getTableRequest);
-        result.getTable().setDatabaseName(originalDatabaseName);
-        return result;
-    }
-
-    @Override
-    public GetTableVersionsResult getTableVersions(GetTableVersionsRequest getTableVersionsRequest) {
-        String originalDatabaseName = getTableVersionsRequest.getDatabaseName();
-        configureRequest(
-                getTableVersionsRequest::getDatabaseName,
-                getTableVersionsRequest::setDatabaseName,
-                getTableVersionsRequest::setCatalogId
-        );
-        GetTableVersionsResult result = super.getTableVersions(getTableVersionsRequest);
-        result.getTableVersions().forEach(tableVersion -> tableVersion.getTable().setDatabaseName(originalDatabaseName));
-        return result;
-    }
-
-    @Override
-    public GetTablesResult getTables(GetTablesRequest getTablesRequest) {
-        String originalDatabaseName = getTablesRequest.getDatabaseName();
-        configureRequest(
-                getTablesRequest::getDatabaseName,
-                getTablesRequest::setDatabaseName,
-                getTablesRequest::setCatalogId
-        );
-        GetTablesResult result = super.getTables(getTablesRequest);
-        result.getTableList().forEach(table -> table.setDatabaseName(originalDatabaseName));
-        return result;
-    }
-
-    @Override
-    public GetUserDefinedFunctionResult getUserDefinedFunction(GetUserDefinedFunctionRequest getUserDefinedFunctionRequest) {
-        configureRequest(
-                getUserDefinedFunctionRequest::getDatabaseName,
-                getUserDefinedFunctionRequest::setDatabaseName,
-                getUserDefinedFunctionRequest::setCatalogId
-        );
-        return super.getUserDefinedFunction(getUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public GetUserDefinedFunctionsResult getUserDefinedFunctions(GetUserDefinedFunctionsRequest getUserDefinedFunctionsRequest) {
-        configureRequest(
-                getUserDefinedFunctionsRequest::getDatabaseName,
-                getUserDefinedFunctionsRequest::setDatabaseName,
-                getUserDefinedFunctionsRequest::setCatalogId
-        );
-        return super.getUserDefinedFunctions(getUserDefinedFunctionsRequest);
-    }
-
-    @Override
-    public UpdateDatabaseResult updateDatabase(UpdateDatabaseRequest updateDatabaseRequest) {
-        configureRequest(
-                updateDatabaseRequest::getName,
-                updateDatabaseRequest::setName,
-                updateDatabaseRequest::setCatalogId
-        );
-        configureRequest(
-                () -> updateDatabaseRequest.getDatabaseInput().getName(),
-                name -> updateDatabaseRequest.getDatabaseInput().setName(name),
-                catalogId -> {}
-        );
-        return super.updateDatabase(updateDatabaseRequest);
-    }
-
-    @Override
-    public UpdatePartitionResult updatePartition(UpdatePartitionRequest updatePartitionRequest) {
-        configureRequest(
-                updatePartitionRequest::getDatabaseName,
-                updatePartitionRequest::setDatabaseName,
-                updatePartitionRequest::setCatalogId
-        );
-        return super.updatePartition(updatePartitionRequest);
-    }
-
-    @Override
-    public UpdateTableResult updateTable(UpdateTableRequest updateTableRequest) {
-        configureRequest(
-                updateTableRequest::getDatabaseName,
-                updateTableRequest::setDatabaseName,
-                updateTableRequest::setCatalogId
-        );
-        return super.updateTable(updateTableRequest);
-    }
-
-    @Override
-    public UpdateUserDefinedFunctionResult updateUserDefinedFunction(UpdateUserDefinedFunctionRequest updateUserDefinedFunctionRequest) {
-        configureRequest(
-                updateUserDefinedFunctionRequest::getDatabaseName,
-                updateUserDefinedFunctionRequest::setDatabaseName,
-                updateUserDefinedFunctionRequest::setCatalogId
-        );
-        return super.updateUserDefinedFunction(updateUserDefinedFunctionRequest);
-    }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSCredentialsProviderFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSCredentialsProviderFactory.java
deleted file mode 100644
index 2f87efa38a..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSCredentialsProviderFactory.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
-
-public class DefaultAWSCredentialsProviderFactory implements
-        AWSCredentialsProviderFactory {
-
-    @Override
-    public AWSCredentialsProvider buildAWSCredentialsProvider(Configuration conf) {
-        return new DefaultAWSCredentialsProviderChain();
-    }
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java
deleted file mode 100644
index 7569139251..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java
+++ /dev/null
@@ -1,662 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.glue.catalog.converters.PartitionNameParser;
-import com.amazonaws.glue.catalog.util.MetastoreClientUtils;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.BatchCreatePartitionRequest;
-import com.amazonaws.services.glue.model.BatchGetPartitionRequest;
-import com.amazonaws.services.glue.model.BatchGetPartitionResult;
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ColumnStatisticsError;
-import com.amazonaws.services.glue.model.CreateDatabaseRequest;
-import com.amazonaws.services.glue.model.CreateTableRequest;
-import com.amazonaws.services.glue.model.CreateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.DatabaseInput;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.DeleteColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.DeleteDatabaseRequest;
-import com.amazonaws.services.glue.model.DeletePartitionRequest;
-import com.amazonaws.services.glue.model.DeleteTableRequest;
-import com.amazonaws.services.glue.model.DeleteUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForPartitionResult;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.GetColumnStatisticsForTableResult;
-import com.amazonaws.services.glue.model.GetDatabaseRequest;
-import com.amazonaws.services.glue.model.GetDatabaseResult;
-import com.amazonaws.services.glue.model.GetDatabasesRequest;
-import com.amazonaws.services.glue.model.GetDatabasesResult;
-import com.amazonaws.services.glue.model.GetPartitionRequest;
-import com.amazonaws.services.glue.model.GetPartitionsRequest;
-import com.amazonaws.services.glue.model.GetPartitionsResult;
-import com.amazonaws.services.glue.model.GetTableRequest;
-import com.amazonaws.services.glue.model.GetTableResult;
-import com.amazonaws.services.glue.model.GetTablesRequest;
-import com.amazonaws.services.glue.model.GetTablesResult;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsRequest;
-import com.amazonaws.services.glue.model.GetUserDefinedFunctionsResult;
-import com.amazonaws.services.glue.model.Partition;
-import com.amazonaws.services.glue.model.PartitionError;
-import com.amazonaws.services.glue.model.PartitionInput;
-import com.amazonaws.services.glue.model.PartitionValueList;
-import com.amazonaws.services.glue.model.Segment;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.glue.model.TableInput;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForPartitionRequest;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForPartitionResult;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForTableRequest;
-import com.amazonaws.services.glue.model.UpdateColumnStatisticsForTableResult;
-import com.amazonaws.services.glue.model.UpdateDatabaseRequest;
-import com.amazonaws.services.glue.model.UpdatePartitionRequest;
-import com.amazonaws.services.glue.model.UpdateTableRequest;
-import com.amazonaws.services.glue.model.UpdateUserDefinedFunctionRequest;
-import com.amazonaws.services.glue.model.UserDefinedFunction;
-import com.amazonaws.services.glue.model.UserDefinedFunctionInput;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-import com.google.common.base.Throwables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.thrift.TException;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-public class DefaultAWSGlueMetastore implements AWSGlueMetastore {
-
-    public static final int BATCH_GET_PARTITIONS_MAX_REQUEST_SIZE = 1000;
-    /**
-     * Based on the maxResults parameter at https://docs.aws.amazon.com/glue/latest/webapi/API_GetPartitions.html
-     */
-    public static final int GET_PARTITIONS_MAX_SIZE = 1000;
-    /**
-     * Maximum number of Glue Segments. A segment defines a non-overlapping region of a table's partitions,
-     * allowing multiple requests to be executed in parallel.
-     */
-    public static final int DEFAULT_NUM_PARTITION_SEGMENTS = 5;
-    /**
-     * Currently the upper limit allowed by Glue is 10.
-     * https://docs.aws.amazon.com/glue/latest/webapi/API_Segment.html
-     */
-    public static final int MAX_NUM_PARTITION_SEGMENTS = 10;
-    public static final String NUM_PARTITION_SEGMENTS_CONF = "aws.glue.partition.num.segments";
-    public static final String CUSTOM_EXECUTOR_FACTORY_CONF = "hive.metastore.executorservice.factory.class";
-
-    /**
-     * Based on the ColumnNames parameter at https://docs.aws.amazon.com/glue/latest/webapi/API_GetColumnStatisticsForPartition.html
-     */
-    public static final int GET_COLUMNS_STAT_MAX_SIZE = 100;
-    public static final int UPDATE_COLUMNS_STAT_MAX_SIZE = 25;
-
-    /**
-     * To be used with UpdateTable
-     */
-    public static final String SKIP_AWS_GLUE_ARCHIVE = "skipAWSGlueArchive";
-
-    private static final int NUM_EXECUTOR_THREADS = 5;
-    static final String GLUE_METASTORE_DELEGATE_THREADPOOL_NAME_FORMAT = "glue-metastore-delegate-%d";
-    private static final ExecutorService GLUE_METASTORE_DELEGATE_THREAD_POOL = Executors.newFixedThreadPool(
-            NUM_EXECUTOR_THREADS,
-            new ThreadFactoryBuilder()
-                    .setNameFormat(GLUE_METASTORE_DELEGATE_THREADPOOL_NAME_FORMAT)
-                    .setDaemon(true).build()
-    );
-
-    private final Configuration conf;
-    private final AWSGlue glueClient;
-    private final String catalogId;
-    private final ExecutorService executorService;
-    private final int numPartitionSegments;
-
-    protected ExecutorService getExecutorService(Configuration conf) {
-        Class<? extends ExecutorServiceFactory> executorFactoryClass = conf
-                .getClass(CUSTOM_EXECUTOR_FACTORY_CONF,
-                        DefaultExecutorServiceFactory.class).asSubclass(
-                        ExecutorServiceFactory.class);
-        ExecutorServiceFactory factory = ReflectionUtils.newInstance(
-                executorFactoryClass, conf);
-        return factory.getExecutorService(conf);
-    }
-
-    public DefaultAWSGlueMetastore(Configuration conf, AWSGlue glueClient) {
-        checkNotNull(conf, "Hive Config cannot be null");
-        checkNotNull(glueClient, "glueClient cannot be null");
-        this.numPartitionSegments = conf.getInt(NUM_PARTITION_SEGMENTS_CONF, DEFAULT_NUM_PARTITION_SEGMENTS);
-        checkArgument(numPartitionSegments <= MAX_NUM_PARTITION_SEGMENTS,
-                String.format("Hive Config [%s] can't exceed %d", NUM_PARTITION_SEGMENTS_CONF, MAX_NUM_PARTITION_SEGMENTS));
-        this.conf = conf;
-        this.glueClient = glueClient;
-        this.catalogId = MetastoreClientUtils.getCatalogId(conf);
-        this.executorService = getExecutorService(conf);
-    }
-
-    // ======================= Database =======================
-
-    @Override
-    public void createDatabase(DatabaseInput databaseInput) {
-        CreateDatabaseRequest createDatabaseRequest = new CreateDatabaseRequest().withDatabaseInput(databaseInput)
-                .withCatalogId(catalogId);
-        glueClient.createDatabase(createDatabaseRequest);
-    }
-
-    @Override
-    public Database getDatabase(String dbName) {
-        GetDatabaseRequest getDatabaseRequest = new GetDatabaseRequest().withCatalogId(catalogId).withName(dbName);
-        GetDatabaseResult result = glueClient.getDatabase(getDatabaseRequest);
-        return result.getDatabase();
-    }
-
-    @Override
-    public List<Database> getAllDatabases() {
-        List<Database> ret = Lists.newArrayList();
-        String nextToken = null;
-        do {
-            GetDatabasesRequest getDatabasesRequest = new GetDatabasesRequest().withNextToken(nextToken).withCatalogId(
-                    catalogId);
-            GetDatabasesResult result = glueClient.getDatabases(getDatabasesRequest);
-            nextToken = result.getNextToken();
-            ret.addAll(result.getDatabaseList());
-        } while (nextToken != null);
-        return ret;
-    }
-
-    @Override
-    public void updateDatabase(String databaseName, DatabaseInput databaseInput) {
-        UpdateDatabaseRequest updateDatabaseRequest = new UpdateDatabaseRequest().withName(databaseName)
-                .withDatabaseInput(databaseInput).withCatalogId(catalogId);
-        glueClient.updateDatabase(updateDatabaseRequest);
-    }
-
-    @Override
-    public void deleteDatabase(String dbName) {
-        DeleteDatabaseRequest deleteDatabaseRequest = new DeleteDatabaseRequest().withName(dbName).withCatalogId(
-                catalogId);
-        glueClient.deleteDatabase(deleteDatabaseRequest);
-    }
-
-    // ======================== Table ========================
-
-    @Override
-    public void createTable(String dbName, TableInput tableInput) {
-        CreateTableRequest createTableRequest = new CreateTableRequest().withTableInput(tableInput)
-                .withDatabaseName(dbName).withCatalogId(catalogId);
-        glueClient.createTable(createTableRequest);
-    }
-
-    @Override
-    public Table getTable(String dbName, String tableName) {
-        GetTableRequest getTableRequest = new GetTableRequest().withDatabaseName(dbName).withName(tableName)
-                .withCatalogId(catalogId);
-        GetTableResult result = glueClient.getTable(getTableRequest);
-        return result.getTable();
-    }
-
-    @Override
-    public List<Table> getTables(String dbname, String tablePattern) {
-        List<Table> ret = new ArrayList<>();
-        String nextToken = null;
-        do {
-            GetTablesRequest getTablesRequest = new GetTablesRequest().withDatabaseName(dbname)
-                    .withExpression(tablePattern).withNextToken(nextToken).withCatalogId(catalogId);
-            GetTablesResult result = glueClient.getTables(getTablesRequest);
-            ret.addAll(result.getTableList());
-            nextToken = result.getNextToken();
-        } while (nextToken != null);
-        return ret;
-    }
-
-    @Override
-    public void updateTable(String dbName, TableInput tableInput) {
-        UpdateTableRequest updateTableRequest = new UpdateTableRequest().withDatabaseName(dbName)
-                .withTableInput(tableInput).withCatalogId(catalogId);
-        glueClient.updateTable(updateTableRequest);
-    }
-
-    @Override
-    public void updateTable(String dbName, TableInput tableInput, EnvironmentContext environmentContext) {
-        UpdateTableRequest updateTableRequest = new UpdateTableRequest().withDatabaseName(dbName)
-                .withTableInput(tableInput).withCatalogId(catalogId).withSkipArchive(skipArchive(environmentContext));
-        glueClient.updateTable(updateTableRequest);
-    }
-
-    private boolean skipArchive(EnvironmentContext environmentContext) {
-        return environmentContext != null &&
-                environmentContext.isSetProperties() &&
-                StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(SKIP_AWS_GLUE_ARCHIVE));
-    }
-
-    @Override
-    public void deleteTable(String dbName, String tableName) {
-        DeleteTableRequest deleteTableRequest = new DeleteTableRequest().withDatabaseName(dbName).withName(tableName)
-                .withCatalogId(catalogId);
-        glueClient.deleteTable(deleteTableRequest);
-    }
-
-    // =========================== Partition ===========================
-
-    @Override
-    public Partition getPartition(String dbName, String tableName, List<String> partitionValues) {
-        GetPartitionRequest request = new GetPartitionRequest()
-                .withDatabaseName(dbName)
-                .withTableName(tableName)
-                .withPartitionValues(partitionValues)
-                .withCatalogId(catalogId);
-        return glueClient.getPartition(request).getPartition();
-    }
-
-    @Override
-    public List<Partition> getPartitionsByNames(String dbName, String tableName,
-            List<PartitionValueList> partitionsToGet) {
-
-        List<List<PartitionValueList>> batchedPartitionsToGet = Lists.partition(partitionsToGet,
-                BATCH_GET_PARTITIONS_MAX_REQUEST_SIZE);
-        List<Future<BatchGetPartitionResult>> batchGetPartitionFutures = Lists.newArrayList();
-
-        for (List<PartitionValueList> batch : batchedPartitionsToGet) {
-            final BatchGetPartitionRequest request = new BatchGetPartitionRequest()
-                    .withDatabaseName(dbName)
-                    .withTableName(tableName)
-                    .withPartitionsToGet(batch)
-                    .withCatalogId(catalogId);
-            batchGetPartitionFutures.add(this.executorService.submit(new Callable<BatchGetPartitionResult>() {
-                @Override
-                public BatchGetPartitionResult call() throws Exception {
-                    return glueClient.batchGetPartition(request);
-                }
-            }));
-        }
-
-        List<Partition> result = Lists.newArrayList();
-        try {
-            for (Future<BatchGetPartitionResult> future : batchGetPartitionFutures) {
-                result.addAll(future.get().getPartitions());
-            }
-        } catch (ExecutionException e) {
-            Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-            Throwables.propagate(e.getCause());
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        }
-        return result;
-    }
-
-    @Override
-    public List<Partition> getPartitions(String dbName, String tableName, String expression,
-            long max) throws TException {
-        if (max == 0) {
-            return Collections.emptyList();
-        }
-        if (max < 0 || max > GET_PARTITIONS_MAX_SIZE) {
-            return getPartitionsParallel(dbName, tableName, expression, max);
-        } else {
-            // We don't need to get too many partitions, so just do it serially.
-            return getCatalogPartitions(dbName, tableName, expression, max, null);
-        }
-    }
-
-    private List<Partition> getPartitionsParallel(
-            final String databaseName,
-            final String tableName,
-            final String expression,
-            final long max) throws TException {
-        // Prepare the segments
-        List<Segment> segments = Lists.newArrayList();
-        for (int i = 0; i < numPartitionSegments; i++) {
-            segments.add(new Segment()
-                    .withSegmentNumber(i)
-                    .withTotalSegments(numPartitionSegments));
-        }
-        // Submit Glue API calls in parallel using the thread pool.
-        // We could convert this into a parallelStream after upgrading to JDK 8 compiler base.
-        List<Future<List<Partition>>> futures = Lists.newArrayList();
-        for (final Segment segment : segments) {
-            futures.add(this.executorService.submit(new Callable<List<Partition>>() {
-                @Override
-                public List<Partition> call() throws Exception {
-                    return getCatalogPartitions(databaseName, tableName, expression, max, segment);
-                }
-            }));
-        }
-
-        // Get the results
-        List<Partition> partitions = Lists.newArrayList();
-        try {
-            for (Future<List<Partition>> future : futures) {
-                List<Partition> segmentPartitions = future.get();
-                if (partitions.size() + segmentPartitions.size() >= max && max > 0) {
-                    // Extract the required number of partitions from the segment and we're done.
-                    long remaining = max - partitions.size();
-                    partitions.addAll(segmentPartitions.subList(0, (int) remaining));
-                    break;
-                } else {
-                    partitions.addAll(segmentPartitions);
-                }
-            }
-        } catch (ExecutionException e) {
-            Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-            Throwables.propagate(e.getCause());
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        }
-        return partitions;
-    }
-
-
-    private List<Partition> getCatalogPartitions(String databaseName, String tableName, String expression,
-            long max, Segment segment) {
-        List<Partition> partitions = Lists.newArrayList();
-        String nextToken = null;
-        do {
-            GetPartitionsRequest request = new GetPartitionsRequest()
-                    .withDatabaseName(databaseName)
-                    .withTableName(tableName)
-                    .withExpression(expression)
-                    .withNextToken(nextToken)
-                    .withCatalogId(catalogId)
-                    .withSegment(segment);
-            GetPartitionsResult res = glueClient.getPartitions(request);
-            List<Partition> list = res.getPartitions();
-            if ((partitions.size() + list.size()) >= max && max > 0) {
-                long remaining = max - partitions.size();
-                partitions.addAll(list.subList(0, (int) remaining));
-                break;
-            }
-            partitions.addAll(list);
-            nextToken = res.getNextToken();
-        } while (nextToken != null);
-        return partitions;
-    }
-
-    @Override
-    public void updatePartition(String dbName, String tableName, List<String> partitionValues,
-            PartitionInput partitionInput) {
-        UpdatePartitionRequest updatePartitionRequest = new UpdatePartitionRequest().withDatabaseName(dbName)
-                .withTableName(tableName).withPartitionValueList(partitionValues)
-                .withPartitionInput(partitionInput).withCatalogId(catalogId);
-        glueClient.updatePartition(updatePartitionRequest);
-    }
-
-    @Override
-    public void deletePartition(String dbName, String tableName, List<String> partitionValues) {
-        DeletePartitionRequest request = new DeletePartitionRequest()
-                .withDatabaseName(dbName)
-                .withTableName(tableName)
-                .withPartitionValues(partitionValues)
-                .withCatalogId(catalogId);
-        glueClient.deletePartition(request);
-    }
-
-    @Override
-    public List<PartitionError> createPartitions(String dbName, String tableName,
-            List<PartitionInput> partitionInputs) {
-        BatchCreatePartitionRequest request =
-                new BatchCreatePartitionRequest().withDatabaseName(dbName)
-                        .withTableName(tableName).withCatalogId(catalogId)
-                        .withPartitionInputList(partitionInputs);
-        return glueClient.batchCreatePartition(request).getErrors();
-    }
-
-    // ====================== User Defined Function ======================
-
-    @Override
-    public void createUserDefinedFunction(String dbName, UserDefinedFunctionInput functionInput) {
-        CreateUserDefinedFunctionRequest createUserDefinedFunctionRequest = new CreateUserDefinedFunctionRequest()
-                .withDatabaseName(dbName).withFunctionInput(functionInput).withCatalogId(catalogId);
-        glueClient.createUserDefinedFunction(createUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public UserDefinedFunction getUserDefinedFunction(String dbName, String functionName) {
-        GetUserDefinedFunctionRequest getUserDefinedFunctionRequest = new GetUserDefinedFunctionRequest()
-                .withDatabaseName(dbName).withFunctionName(functionName).withCatalogId(catalogId);
-        return glueClient.getUserDefinedFunction(getUserDefinedFunctionRequest).getUserDefinedFunction();
-    }
-
-    @Override
-    public List<UserDefinedFunction> getUserDefinedFunctions(String dbName, String pattern) {
-        List<UserDefinedFunction> ret = Lists.newArrayList();
-        String nextToken = null;
-        do {
-            GetUserDefinedFunctionsRequest getUserDefinedFunctionsRequest = new GetUserDefinedFunctionsRequest()
-                    .withDatabaseName(dbName).withPattern(pattern).withNextToken(nextToken).withCatalogId(catalogId);
-            GetUserDefinedFunctionsResult result = glueClient.getUserDefinedFunctions(getUserDefinedFunctionsRequest);
-            nextToken = result.getNextToken();
-            ret.addAll(result.getUserDefinedFunctions());
-        } while (nextToken != null);
-        return ret;
-    }
-
-    @Override
-    public List<UserDefinedFunction> getUserDefinedFunctions(String pattern) {
-        List<UserDefinedFunction> ret = Lists.newArrayList();
-        String nextToken = null;
-        do {
-            GetUserDefinedFunctionsRequest getUserDefinedFunctionsRequest = new GetUserDefinedFunctionsRequest()
-                    .withPattern(pattern).withNextToken(nextToken).withCatalogId(catalogId);
-            GetUserDefinedFunctionsResult result = glueClient.getUserDefinedFunctions(getUserDefinedFunctionsRequest);
-            nextToken = result.getNextToken();
-            ret.addAll(result.getUserDefinedFunctions());
-        } while (nextToken != null);
-        return ret;
-    }
-
-    @Override
-    public void deleteUserDefinedFunction(String dbName, String functionName) {
-        DeleteUserDefinedFunctionRequest deleteUserDefinedFunctionRequest = new DeleteUserDefinedFunctionRequest()
-                .withDatabaseName(dbName).withFunctionName(functionName).withCatalogId(catalogId);
-        glueClient.deleteUserDefinedFunction(deleteUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public void updateUserDefinedFunction(String dbName, String functionName, UserDefinedFunctionInput functionInput) {
-        UpdateUserDefinedFunctionRequest updateUserDefinedFunctionRequest = new UpdateUserDefinedFunctionRequest()
-                .withDatabaseName(dbName).withFunctionName(functionName).withFunctionInput(functionInput)
-                .withCatalogId(catalogId);
-        glueClient.updateUserDefinedFunction(updateUserDefinedFunctionRequest);
-    }
-
-    @Override
-    public void deletePartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, String colName) {
-        DeleteColumnStatisticsForPartitionRequest request = new DeleteColumnStatisticsForPartitionRequest()
-                .withCatalogId(catalogId)
-                .withDatabaseName(dbName)
-                .withTableName(tableName)
-                .withPartitionValues(partitionValues)
-                .withColumnName(colName);
-        glueClient.deleteColumnStatisticsForPartition(request);
-    }
-
-    @Override
-    public void deleteTableColumnStatistics(String dbName, String tableName, String colName) {
-        DeleteColumnStatisticsForTableRequest request = new DeleteColumnStatisticsForTableRequest()
-                .withCatalogId(catalogId)
-                .withDatabaseName(dbName)
-                .withTableName(tableName)
-                .withColumnName(colName);
-        glueClient.deleteColumnStatisticsForTable(request);
-    }
-
-    @Override
-    public Map<String, List<ColumnStatistics>> getPartitionColumnStatistics(String dbName, String tableName, List<String> partitionValues, List<String> columnNames) {
-        Map<String, List<ColumnStatistics>> partitionStatistics = new HashMap<>();
-        List<List<String>> pagedColNames = Lists.partition(columnNames, GET_COLUMNS_STAT_MAX_SIZE);
-        List<String> partValues;
-        for (String partName : partitionValues) {
-            partValues = PartitionNameParser.getPartitionValuesFromName(partName);
-            List<Future<GetColumnStatisticsForPartitionResult>> pagedResult = new ArrayList<>();
-            for (List<String> cols : pagedColNames) {
-                GetColumnStatisticsForPartitionRequest request = new GetColumnStatisticsForPartitionRequest()
-                        .withCatalogId(catalogId)
-                        .withDatabaseName(dbName)
-                        .withTableName(tableName)
-                        .withPartitionValues(partValues)
-                        .withColumnNames(cols);
-                pagedResult.add(GLUE_METASTORE_DELEGATE_THREAD_POOL.submit(new Callable<GetColumnStatisticsForPartitionResult>() {
-                    @Override
-                    public GetColumnStatisticsForPartitionResult call() throws Exception {
-                        return glueClient.getColumnStatisticsForPartition(request);
-                    }
-                }));
-            }
-
-            List<ColumnStatistics> result = new ArrayList<>();
-            for (Future<GetColumnStatisticsForPartitionResult> page : pagedResult) {
-                try {
-                    result.addAll(page.get().getColumnStatisticsList());
-                } catch (ExecutionException e) {
-                    Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-                    Throwables.propagate(e.getCause());
-                } catch (InterruptedException e) {
-                    Thread.currentThread().interrupt();
-                }
-            }
-            partitionStatistics.put(partName, result);
-        }
-        return partitionStatistics;
-    }
-
-    @Override
-    public List<ColumnStatistics> getTableColumnStatistics(String dbName, String tableName, List<String> colNames) {
-        List<List<String>> pagedColNames = Lists.partition(colNames, GET_COLUMNS_STAT_MAX_SIZE);
-        List<Future<GetColumnStatisticsForTableResult>> pagedResult = new ArrayList<>();
-
-        for (List<String> cols : pagedColNames) {
-            GetColumnStatisticsForTableRequest request = new GetColumnStatisticsForTableRequest()
-                    .withCatalogId(catalogId)
-                    .withDatabaseName(dbName)
-                    .withTableName(tableName)
-                    .withColumnNames(cols);
-            pagedResult.add(GLUE_METASTORE_DELEGATE_THREAD_POOL.submit(new Callable<GetColumnStatisticsForTableResult>() {
-                @Override
-                public GetColumnStatisticsForTableResult call() throws Exception {
-                    return glueClient.getColumnStatisticsForTable(request);
-                }
-            }));
-        }
-        List<ColumnStatistics> results = new ArrayList<>();
-
-        for (Future<GetColumnStatisticsForTableResult> page : pagedResult) {
-            try {
-                results.addAll(page.get().getColumnStatisticsList());
-            } catch (ExecutionException e) {
-                Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-                Throwables.propagate(e.getCause());
-            } catch (InterruptedException e) {
-                Thread.currentThread().interrupt();
-            }
-        }
-        return results;
-    }
-
-    @Override
-    public List<ColumnStatisticsError> updatePartitionColumnStatistics(
-            String dbName,
-            String tableName,
-            List<String> partitionValues,
-            List<ColumnStatistics> columnStatistics) {
-
-        List<List<ColumnStatistics>> statisticsListPaged = Lists.partition(columnStatistics, UPDATE_COLUMNS_STAT_MAX_SIZE);
-        List<Future<UpdateColumnStatisticsForPartitionResult>> pagedResult = new ArrayList<>();
-        for (List<ColumnStatistics> statList : statisticsListPaged) {
-            UpdateColumnStatisticsForPartitionRequest request = new UpdateColumnStatisticsForPartitionRequest()
-                    .withCatalogId(catalogId)
-                    .withDatabaseName(dbName)
-                    .withTableName(tableName)
-                    .withPartitionValues(partitionValues)
-                    .withColumnStatisticsList(statList);
-            pagedResult.add(GLUE_METASTORE_DELEGATE_THREAD_POOL.submit(new Callable<UpdateColumnStatisticsForPartitionResult>() {
-                @Override
-                public UpdateColumnStatisticsForPartitionResult call() throws Exception {
-                    return glueClient.updateColumnStatisticsForPartition(request);
-                }
-            }));
-        }
-        // Waiting for calls to finish. Will fail the call if one of the future task fails
-        List<ColumnStatisticsError> columnStatisticsErrors = new ArrayList<>();
-        try {
-            for (Future<UpdateColumnStatisticsForPartitionResult> page : pagedResult) {
-                Optional.ofNullable(page.get().getErrors()).ifPresent(error -> columnStatisticsErrors.addAll(error));
-            }
-        } catch (ExecutionException e) {
-            Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-            Throwables.propagate(e.getCause());
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        }
-        return columnStatisticsErrors;
-    }
-
-    @Override
-    public List<ColumnStatisticsError> updateTableColumnStatistics(
-            String dbName,
-            String tableName,
-            List<ColumnStatistics> columnStatistics) {
-
-        List<List<ColumnStatistics>> statisticsListPaged = Lists.partition(columnStatistics, UPDATE_COLUMNS_STAT_MAX_SIZE);
-        List<Future<UpdateColumnStatisticsForTableResult>> pagedResult = new ArrayList<>();
-        for (List<ColumnStatistics> statList : statisticsListPaged) {
-            UpdateColumnStatisticsForTableRequest request = new UpdateColumnStatisticsForTableRequest()
-                    .withCatalogId(catalogId)
-                    .withDatabaseName(dbName)
-                    .withTableName(tableName)
-                    .withColumnStatisticsList(statList);
-            pagedResult.add(GLUE_METASTORE_DELEGATE_THREAD_POOL.submit(new Callable<UpdateColumnStatisticsForTableResult>() {
-                @Override
-                public UpdateColumnStatisticsForTableResult call() throws Exception {
-                    return glueClient.updateColumnStatisticsForTable(request);
-                }
-            }));
-        }
-
-        // Waiting for calls to finish. Will fail the call if one of the future task fails
-        List<ColumnStatisticsError> columnStatisticsErrors = new ArrayList<>();
-        try {
-            for (Future<UpdateColumnStatisticsForTableResult> page : pagedResult) {
-                Optional.ofNullable(page.get().getErrors()).ifPresent(error -> columnStatisticsErrors.addAll(error));
-            }
-        } catch (ExecutionException e) {
-            Throwables.propagateIfInstanceOf(e.getCause(), AmazonServiceException.class);
-            Throwables.propagate(e.getCause());
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        }
-        return columnStatisticsErrors;
-    }
-}
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultExecutorServiceFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultExecutorServiceFactory.java
deleted file mode 100644
index 326587f161..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultExecutorServiceFactory.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-public class DefaultExecutorServiceFactory implements ExecutorServiceFactory {
-    private static final int NUM_EXECUTOR_THREADS = 5;
-
-    private static final ExecutorService GLUE_METASTORE_DELEGATE_THREAD_POOL = Executors.newFixedThreadPool(
-            NUM_EXECUTOR_THREADS, new ThreadFactoryBuilder()
-                    .setNameFormat(GlueMetastoreClientDelegate.GLUE_METASTORE_DELEGATE_THREADPOOL_NAME_FORMAT)
-                    .setDaemon(true).build()
-    );
-
-    @Override
-    public ExecutorService getExecutorService(Configuration conf) {
-        return GLUE_METASTORE_DELEGATE_THREAD_POOL;
-    }
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/ExecutorServiceFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/ExecutorServiceFactory.java
deleted file mode 100644
index a9b53f55ad..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/ExecutorServiceFactory.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-
-import java.util.concurrent.ExecutorService;
-
-/*
- * Interface for creating an ExecutorService
- */
-public interface ExecutorServiceFactory {
-    public ExecutorService getExecutorService(Configuration conf);
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueClientFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueClientFactory.java
deleted file mode 100644
index 409d8863c3..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueClientFactory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.services.glue.AWSGlue;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-
-/***
- * Interface for creating Glue AWS Client
- */
-public interface GlueClientFactory {
-
-    AWSGlue newClient() throws MetaException;
-
-}
diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueMetastoreClientDelegate.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueMetastoreClientDelegate.java
deleted file mode 100644
index 8dcce126c8..0000000000
--- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/GlueMetastoreClientDelegate.java
+++ /dev/null
@@ -1,1843 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Copied from
-// https://github.com/awslabs/aws-glue-data-catalog-client-for-apache-hive-metastore/blob/branch-3.4.0/
-//
-
-package com.amazonaws.glue.catalog.metastore;
-
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.glue.catalog.converters.CatalogToHiveConverter;
-import com.amazonaws.glue.catalog.converters.CatalogToHiveConverterFactory;
-import com.amazonaws.glue.catalog.converters.GlueInputConverter;
-import com.amazonaws.glue.catalog.converters.HiveToCatalogConverter;
-import com.amazonaws.glue.catalog.converters.PartitionNameParser;
-import static com.amazonaws.glue.catalog.util.AWSGlueConfig.AWS_GLUE_DISABLE_UDF;
-import com.amazonaws.glue.catalog.util.BatchCreatePartitionsHelper;
-import com.amazonaws.glue.catalog.util.ExpressionHelper;
-import com.amazonaws.glue.catalog.util.MetastoreClientUtils;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.deepCopyMap;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.isExternalTable;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.makeDirs;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.validateGlueTable;
-import static com.amazonaws.glue.catalog.util.MetastoreClientUtils.validateTableObject;
-import com.amazonaws.glue.catalog.util.PartitionKey;
-import com.amazonaws.services.glue.model.Column;
-import com.amazonaws.services.glue.model.ColumnStatistics;
-import com.amazonaws.services.glue.model.ColumnStatisticsError;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.DatabaseInput;
-import com.amazonaws.services.glue.model.EntityNotFoundException;
-import com.amazonaws.services.glue.model.Partition;
-import com.amazonaws.services.glue.model.PartitionInput;
-import com.amazonaws.services.glue.model.PartitionValueList;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.glue.model.TableInput;
-import com.amazonaws.services.glue.model.UserDefinedFunction;
-import com.amazonaws.services.glue.model.UserDefinedFunctionInput;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidTxnList;
-import static org.apache.hadoop.hive.metastore.HiveMetaStore.PUBLIC;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
-import static org.apache.hadoop.hive.metastore.TableType.EXTERNAL_TABLE;
-import static org.apache.hadoop.hive.metastore.TableType.MANAGED_TABLE;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-/***
- * Delegate Class to provide all common functionality
- * between Spark-hive version, Hive and Presto clients
- *
- */
-public class GlueMetastoreClientDelegate {
-
-  private static final Logger logger = Logger.getLogger(GlueMetastoreClientDelegate.class);
-
-  private static final List<Role> implicitRoles = Lists.newArrayList(new Role(PUBLIC, 0, PUBLIC));
-  public static final int MILLISECOND_TO_SECOND_FACTOR = 1000;
-  public static final Long NO_MAX = -1L;
-  public static final String MATCH_ALL = ".*";
-  private static final int BATCH_CREATE_PARTITIONS_MAX_REQUEST_SIZE = 100;
-
-  private static final int NUM_EXECUTOR_THREADS = 5;
-  static final String GLUE_METASTORE_DELEGATE_THREADPOOL_NAME_FORMAT = "glue-metastore-delegate-%d";
-  private static final ExecutorService GLUE_METASTORE_DELEGATE_THREAD_POOL = Executors.newFixedThreadPool(
-          NUM_EXECUTOR_THREADS,
-          new ThreadFactoryBuilder()
-                  .setNameFormat(GLUE_METASTORE_DELEGATE_THREADPOOL_NAME_FORMAT)
-                  .setDaemon(true).build()
-  );
-
-  private final AWSGlueMetastore glueMetastore;
-  private final Configuration conf;
-  private final Warehouse wh;
-  // private final AwsGlueHiveShims hiveShims = ShimsLoader.getHiveShims();
-  private final CatalogToHiveConverter catalogToHiveConverter;
-  private final String catalogId;
-
-  public static final String CATALOG_ID_CONF = "hive.metastore.glue.catalogid";
-  public static final String NUM_PARTITION_SEGMENTS_CONF = "aws.glue.partition.num.segments";
-
-  public GlueMetastoreClientDelegate(Configuration conf, AWSGlueMetastore glueMetastore,
-          Warehouse wh) throws MetaException {
-    checkNotNull(conf, "Hive Config cannot be null");
-    checkNotNull(glueMetastore, "glueMetastore cannot be null");
-    checkNotNull(wh, "Warehouse cannot be null");
-
-    catalogToHiveConverter = CatalogToHiveConverterFactory.getCatalogToHiveConverter();
-    this.conf = conf;
-    this.glueMetastore = glueMetastore;
-    this.wh = wh;
-    // TODO - May be validate catalogId confirms to AWS AccountId too.
-    catalogId = MetastoreClientUtils.getCatalogId(conf);
-  }
-
-  // ======================= Database =======================
-
-  public void createDatabase(org.apache.hadoop.hive.metastore.api.Database database) throws TException {
-    checkNotNull(database, "database cannot be null");
-
-    if (StringUtils.isEmpty(database.getLocationUri())) {
-      database.setLocationUri(wh.getDefaultDatabasePath(database.getName()).toString());
-    } else {
-      database.setLocationUri(wh.getDnsPath(new Path(database.getLocationUri())).toString());
-    }
-    Path dbPath = new Path(database.getLocationUri());
-    boolean madeDir = makeDirs(wh, dbPath);
-
-    try {
-      DatabaseInput catalogDatabase = GlueInputConverter.convertToDatabaseInput(database);
-      glueMetastore.createDatabase(catalogDatabase);
-    } catch (AmazonServiceException e) {
-      if (madeDir) {
-        // hiveShims.deleteDir(wh, dbPath, true, false);
-      }
-      throw catalogToHiveConverter.wrapInHiveException(e);
-    } catch (Exception e) {
-      String msg = "Unable to create database: ";
-      logger.error(msg, e);
-      throw new MetaException(msg + e);
... 8190 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org