You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2018/07/19 19:55:31 UTC

[32/51] [partial] hive git commit: HIVE-20188 : Split server-specific code outside of standalone metastore-common (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
deleted file mode 100644
index 15b1aa1..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ /dev/null
@@ -1,1650 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hive.metastore.StatObjectConverter;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.utils.StringUtils;
-import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator;
-import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
-
-public class SharedCache {
-  private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true);
-  private boolean isCatalogCachePrewarmed = false;
-  private Map<String, Catalog> catalogCache = new TreeMap<>();
-  private HashSet<String> catalogsDeletedDuringPrewarm = new HashSet<>();
-  private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false);
-
-  // For caching Database objects. Key is database name
-  private Map<String, Database> databaseCache = new TreeMap<>();
-  private boolean isDatabaseCachePrewarmed = false;
-  private HashSet<String> databasesDeletedDuringPrewarm = new HashSet<>();
-  private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false);
-
-  // For caching TableWrapper objects. Key is aggregate of database name and table name
-  private Map<String, TableWrapper> tableCache = new TreeMap<>();
-  private boolean isTableCachePrewarmed = false;
-  private HashSet<String> tablesDeletedDuringPrewarm = new HashSet<>();
-  private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false);
-  private Map<ByteArrayWrapper, StorageDescriptorWrapper> sdCache = new HashMap<>();
-  private static MessageDigest md;
-  static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName());
-  private AtomicLong cacheUpdateCount = new AtomicLong(0);
-  private static long maxCacheSizeInBytes = -1;
-  private static long currentCacheSizeInBytes = 0;
-  private static HashMap<Class<?>, ObjectEstimator> sizeEstimators = null;
-
-  enum StatsType {
-    ALL(0), ALLBUTDEFAULT(1);
-
-    private final int position;
-
-    StatsType(int position) {
-      this.position = position;
-    }
-
-    public int getPosition() {
-      return position;
-    }
-  }
-
-  static {
-    try {
-      md = MessageDigest.getInstance("MD5");
-    } catch (NoSuchAlgorithmException e) {
-      throw new RuntimeException("should not happen", e);
-    }
-  }
-
-
-  public void initialize(long maxSharedCacheSizeInBytes) {
-    maxCacheSizeInBytes = maxSharedCacheSizeInBytes;
-    // Create estimators
-    if ((maxCacheSizeInBytes > 0) && (sizeEstimators == null)) {
-      sizeEstimators = IncrementalObjectSizeEstimator.createEstimators(SharedCache.class);
-    }
-  }
-
-  private static ObjectEstimator getMemorySizeEstimator(Class<?> clazz) {
-    ObjectEstimator estimator = sizeEstimators.get(clazz);
-    if (estimator == null) {
-      IncrementalObjectSizeEstimator.createEstimators(clazz, sizeEstimators);
-      estimator = sizeEstimators.get(clazz);
-    }
-    return estimator;
-  }
-
-  static class TableWrapper {
-    Table t;
-    String location;
-    Map<String, String> parameters;
-    byte[] sdHash;
-    ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true);
-    // For caching column stats for an unpartitioned table
-    // Key is column name and the value is the col stat object
-    private Map<String, ColumnStatisticsObj> tableColStatsCache =
-        new ConcurrentHashMap<String, ColumnStatisticsObj>();
-    private AtomicBoolean isTableColStatsCacheDirty = new AtomicBoolean(false);
-    // For caching partition objects
-    // Ket is partition values and the value is a wrapper around the partition object
-    private Map<String, PartitionWrapper> partitionCache =
-        new ConcurrentHashMap<String, PartitionWrapper>();
-    private AtomicBoolean isPartitionCacheDirty = new AtomicBoolean(false);
-    // For caching column stats for a partitioned table
-    // Key is aggregate of partition values, column name and the value is the col stat object
-    private Map<String, ColumnStatisticsObj> partitionColStatsCache =
-        new ConcurrentHashMap<String, ColumnStatisticsObj>();
-    private AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false);
-    // For caching aggregate column stats for all and all minus default partition
-    // Key is column name and the value is a list of 2 col stat objects
-    // (all partitions and all but default)
-    private Map<String, List<ColumnStatisticsObj>> aggrColStatsCache =
-        new ConcurrentHashMap<String, List<ColumnStatisticsObj>>();
-    private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false);
-
-    TableWrapper(Table t, byte[] sdHash, String location, Map<String, String> parameters) {
-      this.t = t;
-      this.sdHash = sdHash;
-      this.location = location;
-      this.parameters = parameters;
-    }
-
-    public Table getTable() {
-      return t;
-    }
-
-    public void setTable(Table t) {
-      this.t = t;
-    }
-
-    public byte[] getSdHash() {
-      return sdHash;
-    }
-
-    public void setSdHash(byte[] sdHash) {
-      this.sdHash = sdHash;
-    }
-
-    public String getLocation() {
-      return location;
-    }
-
-    public void setLocation(String location) {
-      this.location = location;
-    }
-
-    public Map<String, String> getParameters() {
-      return parameters;
-    }
-
-    public void setParameters(Map<String, String> parameters) {
-      this.parameters = parameters;
-    }
-
-    boolean sameDatabase(String catName, String dbName) {
-      return catName.equals(t.getCatName()) && dbName.equals(t.getDbName());
-    }
-
-    void cachePartition(Partition part, SharedCache sharedCache) {
-      try {
-        tableLock.writeLock().lock();
-        PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache);
-        partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), wrapper);
-        isPartitionCacheDirty.set(true);
-        // Invalidate cached aggregate stats
-        if (!aggrColStatsCache.isEmpty()) {
-          aggrColStatsCache.clear();
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    boolean cachePartitions(List<Partition> parts, SharedCache sharedCache) {
-      try {
-        tableLock.writeLock().lock();
-        for (Partition part : parts) {
-          PartitionWrapper ptnWrapper = makePartitionWrapper(part, sharedCache);
-          if (maxCacheSizeInBytes > 0) {
-            ObjectEstimator ptnWrapperSizeEstimator =
-                getMemorySizeEstimator(PartitionWrapper.class);
-            long estimatedMemUsage = ptnWrapperSizeEstimator.estimate(ptnWrapper, sizeEstimators);
-            LOG.trace("Memory needed to cache Partition: {} is {} bytes", part, estimatedMemUsage);
-            if (isCacheMemoryFull(estimatedMemUsage)) {
-              LOG.debug(
-                  "Cannot cache Partition: {}. Memory needed is {} bytes, whereas the memory remaining is: {} bytes.",
-                  part, estimatedMemUsage, (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes));
-              return false;
-            } else {
-              currentCacheSizeInBytes += estimatedMemUsage;
-            }
-            LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes);
-          }
-          partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), ptnWrapper);
-          isPartitionCacheDirty.set(true);
-        }
-        // Invalidate cached aggregate stats
-        if (!aggrColStatsCache.isEmpty()) {
-          aggrColStatsCache.clear();
-        }
-        return true;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public Partition getPartition(List<String> partVals, SharedCache sharedCache) {
-      Partition part = null;
-      try {
-        tableLock.readLock().lock();
-        PartitionWrapper wrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals));
-        if (wrapper == null) {
-          return null;
-        }
-        part = CacheUtils.assemble(wrapper, sharedCache);
-      } finally {
-        tableLock.readLock().unlock();
-      }
-      return part;
-    }
-
-    public List<Partition> listPartitions(int max, SharedCache sharedCache) {
-      List<Partition> parts = new ArrayList<>();
-      int count = 0;
-      try {
-        tableLock.readLock().lock();
-        for (PartitionWrapper wrapper : partitionCache.values()) {
-          if (max == -1 || count < max) {
-            parts.add(CacheUtils.assemble(wrapper, sharedCache));
-            count++;
-          }
-        }
-      } finally {
-        tableLock.readLock().unlock();
-      }
-      return parts;
-    }
-
-    public boolean containsPartition(List<String> partVals) {
-      boolean containsPart = false;
-      try {
-        tableLock.readLock().lock();
-        containsPart = partitionCache.containsKey(CacheUtils.buildPartitionCacheKey(partVals));
-      } finally {
-        tableLock.readLock().unlock();
-      }
-      return containsPart;
-    }
-
-    public Partition removePartition(List<String> partVal, SharedCache sharedCache) {
-      Partition part = null;
-      try {
-        tableLock.writeLock().lock();
-        PartitionWrapper wrapper =
-            partitionCache.remove(CacheUtils.buildPartitionCacheKey(partVal));
-        isPartitionCacheDirty.set(true);
-        if (wrapper.getSdHash() != null) {
-          sharedCache.decrSd(wrapper.getSdHash());
-        }
-        part = CacheUtils.assemble(wrapper, sharedCache);
-        // Remove col stats
-        String partialKey = CacheUtils.buildPartitionCacheKey(partVal);
-        Iterator<Entry<String, ColumnStatisticsObj>> iterator =
-            partitionColStatsCache.entrySet().iterator();
-        while (iterator.hasNext()) {
-          Entry<String, ColumnStatisticsObj> entry = iterator.next();
-          String key = entry.getKey();
-          if (key.toLowerCase().startsWith(partialKey.toLowerCase())) {
-            iterator.remove();
-          }
-        }
-        // Invalidate cached aggregate stats
-        if (!aggrColStatsCache.isEmpty()) {
-          aggrColStatsCache.clear();
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-      return part;
-    }
-
-    public void removePartitions(List<List<String>> partVals, SharedCache sharedCache) {
-      try {
-        tableLock.writeLock().lock();
-        for (List<String> partVal : partVals) {
-          removePartition(partVal, sharedCache);
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void alterPartition(List<String> partVals, Partition newPart, SharedCache sharedCache) {
-      try {
-        tableLock.writeLock().lock();
-        removePartition(partVals, sharedCache);
-        cachePartition(newPart, sharedCache);
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void alterPartitions(List<List<String>> partValsList, List<Partition> newParts,
-        SharedCache sharedCache) {
-      try {
-        tableLock.writeLock().lock();
-        for (int i = 0; i < partValsList.size(); i++) {
-          List<String> partVals = partValsList.get(i);
-          Partition newPart = newParts.get(i);
-          alterPartition(partVals, newPart, sharedCache);
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void refreshPartitions(List<Partition> partitions, SharedCache sharedCache) {
-      Map<String, PartitionWrapper> newPartitionCache = new HashMap<String, PartitionWrapper>();
-      try {
-        tableLock.writeLock().lock();
-        for (Partition part : partitions) {
-          if (isPartitionCacheDirty.compareAndSet(true, false)) {
-            LOG.debug("Skipping partition cache update for table: " + getTable().getTableName()
-                + "; the partition list we have is dirty.");
-            return;
-          }
-          String key = CacheUtils.buildPartitionCacheKey(part.getValues());
-          PartitionWrapper wrapper = partitionCache.get(key);
-          if (wrapper != null) {
-            if (wrapper.getSdHash() != null) {
-              sharedCache.decrSd(wrapper.getSdHash());
-            }
-          }
-          wrapper = makePartitionWrapper(part, sharedCache);
-          newPartitionCache.put(key, wrapper);
-        }
-        partitionCache = newPartitionCache;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public boolean updateTableColStats(List<ColumnStatisticsObj> colStatsForTable) {
-      try {
-        tableLock.writeLock().lock();
-        for (ColumnStatisticsObj colStatObj : colStatsForTable) {
-          // Get old stats object if present
-          String key = colStatObj.getColName();
-          ColumnStatisticsObj oldStatsObj = tableColStatsCache.get(key);
-          if (oldStatsObj != null) {
-            // Update existing stat object's field
-            StatObjectConverter.setFieldsIntoOldStats(oldStatsObj, colStatObj);
-          } else {
-            // No stats exist for this key; add a new object to the cache
-            // TODO: get rid of deepCopy after making sure callers don't use references
-            if (maxCacheSizeInBytes > 0) {
-              ObjectEstimator tblColStatsSizeEstimator =
-                  getMemorySizeEstimator(ColumnStatisticsObj.class);
-              long estimatedMemUsage =
-                  tblColStatsSizeEstimator.estimate(colStatObj, sizeEstimators);
-              LOG.trace("Memory needed to cache Table Column Statistics Object: {} is {} bytes",
-                  colStatObj, estimatedMemUsage);
-              if (isCacheMemoryFull(estimatedMemUsage)) {
-                LOG.debug(
-                    "Cannot cache Table Column Statistics Object: {}. Memory needed is {} bytes, "
-                        + "whereas the memory remaining is: {} bytes.",
-                    colStatObj, estimatedMemUsage,
-                    (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes));
-                return false;
-              } else {
-                currentCacheSizeInBytes += estimatedMemUsage;
-              }
-              LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes);
-            }
-            tableColStatsCache.put(key, colStatObj.deepCopy());
-          }
-        }
-        isTableColStatsCacheDirty.set(true);
-        return true;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void refreshTableColStats(List<ColumnStatisticsObj> colStatsForTable) {
-      Map<String, ColumnStatisticsObj> newTableColStatsCache =
-          new HashMap<String, ColumnStatisticsObj>();
-      try {
-        tableLock.writeLock().lock();
-        for (ColumnStatisticsObj colStatObj : colStatsForTable) {
-          if (isTableColStatsCacheDirty.compareAndSet(true, false)) {
-            LOG.debug("Skipping table col stats cache update for table: "
-                + getTable().getTableName() + "; the table col stats list we have is dirty.");
-            return;
-          }
-          String key = colStatObj.getColName();
-          // TODO: get rid of deepCopy after making sure callers don't use references
-          newTableColStatsCache.put(key, colStatObj.deepCopy());
-        }
-        tableColStatsCache = newTableColStatsCache;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public List<ColumnStatisticsObj> getCachedTableColStats(List<String> colNames) {
-      List<ColumnStatisticsObj> colStatObjs = new ArrayList<ColumnStatisticsObj>();
-      try {
-        tableLock.readLock().lock();
-        for (String colName : colNames) {
-          ColumnStatisticsObj colStatObj = tableColStatsCache.get(colName);
-          if (colStatObj != null) {
-            colStatObjs.add(colStatObj);
-          }
-        }
-      } finally {
-        tableLock.readLock().unlock();
-      }
-      return colStatObjs;
-    }
-
-    public void removeTableColStats(String colName) {
-      try {
-        tableLock.writeLock().lock();
-        tableColStatsCache.remove(colName);
-        isTableColStatsCacheDirty.set(true);
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public ColumnStatisticsObj getPartitionColStats(List<String> partVal, String colName) {
-      try {
-        tableLock.readLock().lock();
-        return partitionColStatsCache
-            .get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName));
-      } finally {
-        tableLock.readLock().unlock();
-      }
-    }
-
-    public boolean updatePartitionColStats(List<String> partVal,
-        List<ColumnStatisticsObj> colStatsObjs) {
-      try {
-        tableLock.writeLock().lock();
-        for (ColumnStatisticsObj colStatObj : colStatsObjs) {
-          // Get old stats object if present
-          String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName());
-          ColumnStatisticsObj oldStatsObj = partitionColStatsCache.get(key);
-          if (oldStatsObj != null) {
-            // Update existing stat object's field
-            StatObjectConverter.setFieldsIntoOldStats(oldStatsObj, colStatObj);
-          } else {
-            // No stats exist for this key; add a new object to the cache
-            // TODO: get rid of deepCopy after making sure callers don't use references
-            if (maxCacheSizeInBytes > 0) {
-              ObjectEstimator ptnColStatsSizeEstimator =
-                  getMemorySizeEstimator(ColumnStatisticsObj.class);
-              long estimatedMemUsage =
-                  ptnColStatsSizeEstimator.estimate(colStatObj, sizeEstimators);
-              LOG.trace("Memory needed to cache Partition Column Statistics Object: {} is {} bytes",
-                  colStatObj, estimatedMemUsage);
-              if (isCacheMemoryFull(estimatedMemUsage)) {
-                LOG.debug(
-                    "Cannot cache Partition Column Statistics Object: {}. Memory needed is {} bytes, "
-                    + "whereas the memory remaining is: {} bytes.",
-                    colStatObj, estimatedMemUsage,
-                    (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes));
-                return false;
-              } else {
-                currentCacheSizeInBytes += estimatedMemUsage;
-              }
-              LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes);
-            }
-            partitionColStatsCache.put(key, colStatObj.deepCopy());
-          }
-        }
-        isPartitionColStatsCacheDirty.set(true);
-        // Invalidate cached aggregate stats
-        if (!aggrColStatsCache.isEmpty()) {
-          aggrColStatsCache.clear();
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-      return true;
-    }
-
-    public void removePartitionColStats(List<String> partVals, String colName) {
-      try {
-        tableLock.writeLock().lock();
-        partitionColStatsCache.remove(CacheUtils.buildPartitonColStatsCacheKey(partVals, colName));
-        isPartitionColStatsCacheDirty.set(true);
-        // Invalidate cached aggregate stats
-        if (!aggrColStatsCache.isEmpty()) {
-          aggrColStatsCache.clear();
-        }
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void refreshPartitionColStats(List<ColumnStatistics> partitionColStats) {
-      Map<String, ColumnStatisticsObj> newPartitionColStatsCache =
-          new HashMap<String, ColumnStatisticsObj>();
-      try {
-        tableLock.writeLock().lock();
-        String tableName = StringUtils.normalizeIdentifier(getTable().getTableName());
-        for (ColumnStatistics cs : partitionColStats) {
-          if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) {
-            LOG.debug("Skipping partition column stats cache update for table: "
-                + getTable().getTableName() + "; the partition column stats list we have is dirty");
-            return;
-          }
-          List<String> partVal;
-          try {
-            partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null);
-            List<ColumnStatisticsObj> colStatsObjs = cs.getStatsObj();
-            for (ColumnStatisticsObj colStatObj : colStatsObjs) {
-              if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) {
-                LOG.debug("Skipping partition column stats cache update for table: "
-                    + getTable().getTableName() + "; the partition column list we have is dirty");
-                return;
-              }
-              String key =
-                  CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName());
-              newPartitionColStatsCache.put(key, colStatObj.deepCopy());
-            }
-          } catch (MetaException e) {
-            LOG.debug("Unable to cache partition column stats for table: " + tableName, e);
-          }
-        }
-        partitionColStatsCache = newPartitionColStatsCache;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public List<ColumnStatisticsObj> getAggrPartitionColStats(List<String> colNames,
-        StatsType statsType) {
-      List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>();
-      try {
-        tableLock.readLock().lock();
-        for (String colName : colNames) {
-          List<ColumnStatisticsObj> colStatList = aggrColStatsCache.get(colName);
-          // If unable to find stats for a column, return null so we can build stats
-          if (colStatList == null) {
-            return null;
-          }
-          ColumnStatisticsObj colStatObj = colStatList.get(statsType.getPosition());
-          // If unable to find stats for this StatsType, return null so we can build stats
-          if (colStatObj == null) {
-            return null;
-          }
-          colStats.add(colStatObj);
-        }
-      } finally {
-        tableLock.readLock().unlock();
-      }
-      return colStats;
-    }
-
-    public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions,
-        AggrStats aggrStatsAllButDefaultPartition) {
-      try {
-        tableLock.writeLock().lock();
-        if (aggrStatsAllPartitions != null) {
-          for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) {
-            if (statObj != null) {
-              List<ColumnStatisticsObj> aggrStats = new ArrayList<ColumnStatisticsObj>();
-              aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy());
-              aggrColStatsCache.put(statObj.getColName(), aggrStats);
-            }
-          }
-        }
-        if (aggrStatsAllButDefaultPartition != null) {
-          for (ColumnStatisticsObj statObj : aggrStatsAllButDefaultPartition.getColStats()) {
-            if (statObj != null) {
-              List<ColumnStatisticsObj> aggrStats = aggrColStatsCache.get(statObj.getColName());
-              if (aggrStats == null) {
-                aggrStats = new ArrayList<ColumnStatisticsObj>();
-              }
-              aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy());
-            }
-          }
-        }
-        isAggrPartitionColStatsCacheDirty.set(true);
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions,
-        AggrStats aggrStatsAllButDefaultPartition) {
-      Map<String, List<ColumnStatisticsObj>> newAggrColStatsCache =
-          new HashMap<String, List<ColumnStatisticsObj>>();
-      try {
-        tableLock.writeLock().lock();
-        if (aggrStatsAllPartitions != null) {
-          for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) {
-            if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) {
-              LOG.debug("Skipping aggregate stats cache update for table: "
-                  + getTable().getTableName() + "; the aggregate stats list we have is dirty");
-              return;
-            }
-            if (statObj != null) {
-              List<ColumnStatisticsObj> aggrStats = new ArrayList<ColumnStatisticsObj>();
-              aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy());
-              newAggrColStatsCache.put(statObj.getColName(), aggrStats);
-            }
-          }
-        }
-        if (aggrStatsAllButDefaultPartition != null) {
-          for (ColumnStatisticsObj statObj : aggrStatsAllButDefaultPartition.getColStats()) {
-            if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) {
-              LOG.debug("Skipping aggregate stats cache update for table: "
-                  + getTable().getTableName() + "; the aggregate stats list we have is dirty");
-              return;
-            }
-            if (statObj != null) {
-              List<ColumnStatisticsObj> aggrStats = newAggrColStatsCache.get(statObj.getColName());
-              if (aggrStats == null) {
-                aggrStats = new ArrayList<ColumnStatisticsObj>();
-              }
-              aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy());
-            }
-          }
-        }
-        aggrColStatsCache = newAggrColStatsCache;
-      } finally {
-        tableLock.writeLock().unlock();
-      }
-    }
-
-    private void updateTableObj(Table newTable, SharedCache sharedCache) {
-      byte[] sdHash = getSdHash();
-      // Remove old table object's sd hash
-      if (sdHash != null) {
-        sharedCache.decrSd(sdHash);
-      }
-      Table tblCopy = newTable.deepCopy();
-      if (tblCopy.getPartitionKeys() != null) {
-        for (FieldSchema fs : tblCopy.getPartitionKeys()) {
-          fs.setName(StringUtils.normalizeIdentifier(fs.getName()));
-        }
-      }
-      setTable(tblCopy);
-      if (tblCopy.getSd() != null) {
-        sdHash = MetaStoreUtils.hashStorageDescriptor(tblCopy.getSd(), md);
-        StorageDescriptor sd = tblCopy.getSd();
-        sharedCache.increSd(sd, sdHash);
-        tblCopy.setSd(null);
-        setSdHash(sdHash);
-        setLocation(sd.getLocation());
-        setParameters(sd.getParameters());
-      } else {
-        setSdHash(null);
-        setLocation(null);
-        setParameters(null);
-      }
-    }
-
-    private PartitionWrapper makePartitionWrapper(Partition part, SharedCache sharedCache) {
-      Partition partCopy = part.deepCopy();
-      PartitionWrapper wrapper;
-      if (part.getSd() != null) {
-        byte[] sdHash = MetaStoreUtils.hashStorageDescriptor(part.getSd(), md);
-        StorageDescriptor sd = part.getSd();
-        sharedCache.increSd(sd, sdHash);
-        partCopy.setSd(null);
-        wrapper = new PartitionWrapper(partCopy, sdHash, sd.getLocation(), sd.getParameters());
-      } else {
-        wrapper = new PartitionWrapper(partCopy, null, null, null);
-      }
-      return wrapper;
-    }
-  }
-
-  static class PartitionWrapper {
-    Partition p;
-    String location;
-    Map<String, String> parameters;
-    byte[] sdHash;
-
-    PartitionWrapper(Partition p, byte[] sdHash, String location, Map<String, String> parameters) {
-      this.p = p;
-      this.sdHash = sdHash;
-      this.location = location;
-      this.parameters = parameters;
-    }
-
-    public Partition getPartition() {
-      return p;
-    }
-
-    public byte[] getSdHash() {
-      return sdHash;
-    }
-
-    public String getLocation() {
-      return location;
-    }
-
-    public Map<String, String> getParameters() {
-      return parameters;
-    }
-  }
-
-  static class StorageDescriptorWrapper {
-    StorageDescriptor sd;
-    int refCount = 0;
-
-    StorageDescriptorWrapper(StorageDescriptor sd, int refCount) {
-      this.sd = sd;
-      this.refCount = refCount;
-    }
-
-    public StorageDescriptor getSd() {
-      return sd;
-    }
-
-    public int getRefCount() {
-      return refCount;
-    }
-  }
-
-  public void populateCatalogsInCache(Collection<Catalog> catalogs) {
-    for (Catalog cat : catalogs) {
-      Catalog catCopy = cat.deepCopy();
-      // ObjectStore also stores db name in lowercase
-      catCopy.setName(catCopy.getName().toLowerCase());
-      try {
-        cacheLock.writeLock().lock();
-        // Since we allow write operations on cache while prewarm is happening:
-        // 1. Don't add databases that were deleted while we were preparing list for prewarm
-        // 2. Skip overwriting exisiting db object
-        // (which is present because it was added after prewarm started)
-        if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) {
-          continue;
-        }
-        catalogCache.putIfAbsent(catCopy.getName(), catCopy);
-        catalogsDeletedDuringPrewarm.clear();
-        isCatalogCachePrewarmed = true;
-      } finally {
-        cacheLock.writeLock().unlock();
-      }
-    }
-  }
-
-  public Catalog getCatalogFromCache(String name) {
-    Catalog cat = null;
-    try {
-      cacheLock.readLock().lock();
-      if (catalogCache.get(name) != null) {
-        cat = catalogCache.get(name).deepCopy();
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return cat;
-  }
-
-  public void addCatalogToCache(Catalog cat) {
-    try {
-      cacheLock.writeLock().lock();
-      Catalog catCopy = cat.deepCopy();
-      // ObjectStore also stores db name in lowercase
-      catCopy.setName(catCopy.getName().toLowerCase());
-      catalogCache.put(cat.getName(), catCopy);
-      isCatalogCacheDirty.set(true);
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public void alterCatalogInCache(String catName, Catalog newCat) {
-    try {
-      cacheLock.writeLock().lock();
-      removeCatalogFromCache(catName);
-      addCatalogToCache(newCat.deepCopy());
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public void removeCatalogFromCache(String name) {
-    name = normalizeIdentifier(name);
-    try {
-      cacheLock.writeLock().lock();
-      // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check
-      // so that the prewarm thread does not add it back
-      if (!isCatalogCachePrewarmed) {
-        catalogsDeletedDuringPrewarm.add(name);
-      }
-      if (catalogCache.remove(name) != null) {
-        isCatalogCacheDirty.set(true);
-      }
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public List<String> listCachedCatalogs() {
-    try {
-      cacheLock.readLock().lock();
-      return new ArrayList<>(catalogCache.keySet());
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public boolean isCatalogCachePrewarmed() {
-    return isCatalogCachePrewarmed;
-  }
-
-  public Database getDatabaseFromCache(String catName, String name) {
-    Database db = null;
-    try {
-      cacheLock.readLock().lock();
-      String key = CacheUtils.buildDbKey(catName, name);
-      if (databaseCache.get(key) != null) {
-        db = databaseCache.get(key).deepCopy();
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return db;
-  }
-
-  public void populateDatabasesInCache(List<Database> databases) {
-    for (Database db : databases) {
-      Database dbCopy = db.deepCopy();
-      // ObjectStore also stores db name in lowercase
-      dbCopy.setName(dbCopy.getName().toLowerCase());
-      try {
-        cacheLock.writeLock().lock();
-        // Since we allow write operations on cache while prewarm is happening:
-        // 1. Don't add databases that were deleted while we were preparing list for prewarm
-        // 2. Skip overwriting exisiting db object
-        // (which is present because it was added after prewarm started)
-        String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(),
-            dbCopy.getName().toLowerCase());
-        if (databasesDeletedDuringPrewarm.contains(key)) {
-          continue;
-        }
-        databaseCache.putIfAbsent(key, dbCopy);
-        databasesDeletedDuringPrewarm.clear();
-        isDatabaseCachePrewarmed = true;
-      } finally {
-        cacheLock.writeLock().unlock();
-      }
-    }
-  }
-
-  public boolean isDatabaseCachePrewarmed() {
-    return isDatabaseCachePrewarmed;
-  }
-
-  public void addDatabaseToCache(Database db) {
-    try {
-      cacheLock.writeLock().lock();
-      Database dbCopy = db.deepCopy();
-      // ObjectStore also stores db name in lowercase
-      dbCopy.setName(dbCopy.getName().toLowerCase());
-      dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase());
-      databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy);
-      isDatabaseCacheDirty.set(true);
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public void removeDatabaseFromCache(String catName, String dbName) {
-    try {
-      cacheLock.writeLock().lock();
-      // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check
-      // so that the prewarm thread does not add it back
-      String key = CacheUtils.buildDbKey(catName, dbName);
-      if (!isDatabaseCachePrewarmed) {
-        databasesDeletedDuringPrewarm.add(key);
-      }
-      if (databaseCache.remove(key) != null) {
-        isDatabaseCacheDirty.set(true);
-      }
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public List<String> listCachedDatabases(String catName) {
-    List<String> results = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (String pair : databaseCache.keySet()) {
-        String[] n = CacheUtils.splitDbName(pair);
-        if (catName.equals(n[0]))
-          results.add(n[1]);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return results;
-  }
-
-  public List<String> listCachedDatabases(String catName, String pattern) {
-    List<String> results = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (String pair : databaseCache.keySet()) {
-        String[] n = CacheUtils.splitDbName(pair);
-        if (catName.equals(n[0])) {
-          n[1] = StringUtils.normalizeIdentifier(n[1]);
-          if (CacheUtils.matches(n[1], pattern)) {
-            results.add(n[1]);
-          }
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return results;
-  }
-
-  /**
-   * Replaces the old db object with the new one. This will add the new database to cache if it does
-   * not exist.
-   */
-  public void alterDatabaseInCache(String catName, String dbName, Database newDb) {
-    try {
-      cacheLock.writeLock().lock();
-      removeDatabaseFromCache(catName, dbName);
-      addDatabaseToCache(newDb.deepCopy());
-      isDatabaseCacheDirty.set(true);
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public void refreshDatabasesInCache(List<Database> databases) {
-    try {
-      cacheLock.writeLock().lock();
-      if (isDatabaseCacheDirty.compareAndSet(true, false)) {
-        LOG.debug("Skipping database cache update; the database list we have is dirty.");
-        return;
-      }
-      databaseCache.clear();
-      for (Database db : databases) {
-        addDatabaseToCache(db);
-      }
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public int getCachedDatabaseCount() {
-    try {
-      cacheLock.readLock().lock();
-      return databaseCache.size();
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public boolean populateTableInCache(Table table, ColumnStatistics tableColStats,
-      List<Partition> partitions, List<ColumnStatistics> partitionColStats,
-      AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
-    String catName = StringUtils.normalizeIdentifier(table.getCatName());
-    String dbName = StringUtils.normalizeIdentifier(table.getDbName());
-    String tableName = StringUtils.normalizeIdentifier(table.getTableName());
-    // Since we allow write operations on cache while prewarm is happening:
-    // 1. Don't add tables that were deleted while we were preparing list for prewarm
-    if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) {
-      return false;
-    }
-    TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table);
-    if (maxCacheSizeInBytes > 0) {
-      ObjectEstimator tblWrapperSizeEstimator = getMemorySizeEstimator(TableWrapper.class);
-      long estimatedMemUsage = tblWrapperSizeEstimator.estimate(tblWrapper, sizeEstimators);
-      LOG.debug("Memory needed to cache Database: {}'s Table: {}, is {} bytes", dbName, tableName,
-          estimatedMemUsage);
-      if (isCacheMemoryFull(estimatedMemUsage)) {
-        LOG.debug(
-            "Cannot cache Database: {}'s Table: {}. Memory needed is {} bytes, "
-                + "whereas the memory we have remaining is: {} bytes.",
-            dbName, tableName, estimatedMemUsage,
-            (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes));
-        return false;
-      } else {
-        currentCacheSizeInBytes += estimatedMemUsage;
-      }
-      LOG.debug("Current cache size: {} bytes", currentCacheSizeInBytes);
-    }
-    if (!table.isSetPartitionKeys() && (tableColStats != null)) {
-      if (!tblWrapper.updateTableColStats(tableColStats.getStatsObj())) {
-        return false;
-      }
-    } else {
-      if (partitions != null) {
-        // If the partitions were not added due to memory limit, return false
-        if (!tblWrapper.cachePartitions(partitions, this)) {
-          return false;
-        }
-      }
-      if (partitionColStats != null) {
-        for (ColumnStatistics cs : partitionColStats) {
-          List<String> partVal;
-          try {
-            partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null);
-            List<ColumnStatisticsObj> colStats = cs.getStatsObj();
-            if (!tblWrapper.updatePartitionColStats(partVal, colStats)) {
-              return false;
-            }
-          } catch (MetaException e) {
-            LOG.debug("Unable to cache partition column stats for table: " + tableName, e);
-          }
-        }
-      }
-      tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions,
-          aggrStatsAllButDefaultPartition);
-    }
-    try {
-      cacheLock.writeLock().lock();
-      // 2. Skip overwriting exisiting table object
-      // (which is present because it was added after prewarm started)
-      tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper);
-      return true;
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  private static boolean isCacheMemoryFull(long estimatedMemUsage) {
-    return (0.8*maxCacheSizeInBytes) < (currentCacheSizeInBytes + estimatedMemUsage);
-  }
-
-  public void completeTableCachePrewarm() {
-    try {
-      cacheLock.writeLock().lock();
-      tablesDeletedDuringPrewarm.clear();
-      isTableCachePrewarmed = true;
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public Table getTableFromCache(String catName, String dbName, String tableName) {
-    Table t = null;
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper =
-          tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
-      if (tblWrapper != null) {
-        t = CacheUtils.assemble(tblWrapper, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return t;
-  }
-
-  public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) {
-    try {
-      cacheLock.writeLock().lock();
-      TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl);
-      tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper);
-      isTableCacheDirty.set(true);
-      return wrapper;
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  private TableWrapper createTableWrapper(String catName, String dbName, String tblName,
-      Table tbl) {
-    TableWrapper wrapper;
-    Table tblCopy = tbl.deepCopy();
-    tblCopy.setCatName(normalizeIdentifier(catName));
-    tblCopy.setDbName(normalizeIdentifier(dbName));
-    tblCopy.setTableName(normalizeIdentifier(tblName));
-    if (tblCopy.getPartitionKeys() != null) {
-      for (FieldSchema fs : tblCopy.getPartitionKeys()) {
-        fs.setName(normalizeIdentifier(fs.getName()));
-      }
-    }
-    if (tbl.getSd() != null) {
-      byte[] sdHash = MetaStoreUtils.hashStorageDescriptor(tbl.getSd(), md);
-      StorageDescriptor sd = tbl.getSd();
-      increSd(sd, sdHash);
-      tblCopy.setSd(null);
-      wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters());
-    } else {
-      wrapper = new TableWrapper(tblCopy, null, null, null);
-    }
-    return wrapper;
-  }
-
-  public void removeTableFromCache(String catName, String dbName, String tblName) {
-    try {
-      cacheLock.writeLock().lock();
-      // If table cache is not yet prewarmed, add this to a set which the prewarm thread can check
-      // so that the prewarm thread does not add it back
-      if (!isTableCachePrewarmed) {
-        tablesDeletedDuringPrewarm.add(CacheUtils.buildTableKey(catName, dbName, tblName));
-      }
-      TableWrapper tblWrapper =
-          tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName));
-      byte[] sdHash = tblWrapper.getSdHash();
-      if (sdHash != null) {
-        decrSd(sdHash);
-      }
-      isTableCacheDirty.set(true);
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) {
-    try {
-      cacheLock.writeLock().lock();
-      TableWrapper tblWrapper =
-          tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.updateTableObj(newTable, this);
-        String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName());
-        String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName());
-        tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper);
-        isTableCacheDirty.set(true);
-      }
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public List<Table> listCachedTables(String catName, String dbName) {
-    List<Table> tables = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.sameDatabase(catName, dbName)) {
-          tables.add(CacheUtils.assemble(wrapper, this));
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return tables;
-  }
-
-  public List<String> listCachedTableNames(String catName, String dbName) {
-    List<String> tableNames = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.sameDatabase(catName, dbName)) {
-          tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return tableNames;
-  }
-
-  public List<String> listCachedTableNames(String catName, String dbName, String pattern,
-      short maxTables) {
-    List<String> tableNames = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      int count = 0;
-      for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.sameDatabase(catName, dbName)
-            && CacheUtils.matches(wrapper.getTable().getTableName(), pattern)
-            && (maxTables == -1 || count < maxTables)) {
-          tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
-          count++;
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return tableNames;
-  }
-
-  public List<String> listCachedTableNames(String catName, String dbName, String pattern,
-      TableType tableType) {
-    List<String> tableNames = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.sameDatabase(catName, dbName)
-            && CacheUtils.matches(wrapper.getTable().getTableName(), pattern)
-            && wrapper.getTable().getTableType().equals(tableType.toString())) {
-          tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return tableNames;
-  }
-
-  public void refreshTablesInCache(String catName, String dbName, List<Table> tables) {
-    try {
-      cacheLock.writeLock().lock();
-      if (isTableCacheDirty.compareAndSet(true, false)) {
-        LOG.debug("Skipping table cache update; the table list we have is dirty.");
-        return;
-      }
-      Map<String, TableWrapper> newTableCache = new HashMap<>();
-      for (Table tbl : tables) {
-        String tblName = StringUtils.normalizeIdentifier(tbl.getTableName());
-        TableWrapper tblWrapper =
-            tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-        if (tblWrapper != null) {
-          tblWrapper.updateTableObj(tbl, this);
-        } else {
-          tblWrapper = createTableWrapper(catName, dbName, tblName, tbl);
-        }
-        newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper);
-      }
-      tableCache.clear();
-      tableCache = newTableCache;
-    } finally {
-      cacheLock.writeLock().unlock();
-    }
-  }
-
-  public List<ColumnStatisticsObj> getTableColStatsFromCache(String catName, String dbName,
-      String tblName, List<String> colNames) {
-    List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        colStatObjs = tblWrapper.getCachedTableColStats(colNames);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return colStatObjs;
-  }
-
-  public void removeTableColStatsFromCache(String catName, String dbName, String tblName,
-      String colName) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.removeTableColStats(colName);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void updateTableColStatsInCache(String catName, String dbName, String tableName,
-      List<ColumnStatisticsObj> colStatsForTable) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper =
-          tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
-      if (tblWrapper != null) {
-        tblWrapper.updateTableColStats(colStatsForTable);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void refreshTableColStatsInCache(String catName, String dbName, String tableName,
-      List<ColumnStatisticsObj> colStatsForTable) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper =
-          tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
-      if (tblWrapper != null) {
-        tblWrapper.refreshTableColStats(colStatsForTable);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public int getCachedTableCount() {
-    try {
-      cacheLock.readLock().lock();
-      return tableCache.size();
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
-      List<String> tableTypes) {
-    List<TableMeta> tableMetas = new ArrayList<>();
-    try {
-      cacheLock.readLock().lock();
-      for (String dbName : listCachedDatabases(catName)) {
-        if (CacheUtils.matches(dbName, dbNames)) {
-          for (Table table : listCachedTables(catName, dbName)) {
-            if (CacheUtils.matches(table.getTableName(), tableNames)) {
-              if (tableTypes == null || tableTypes.contains(table.getTableType())) {
-                TableMeta metaData =
-                    new TableMeta(dbName, table.getTableName(), table.getTableType());
-                metaData.setCatName(catName);
-                metaData.setComments(table.getParameters().get("comment"));
-                tableMetas.add(metaData);
-              }
-            }
-          }
-        }
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return tableMetas;
-  }
-
-  public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.cachePartition(part, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void addPartitionsToCache(String catName, String dbName, String tblName,
-      List<Partition> parts) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.cachePartitions(parts, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public Partition getPartitionFromCache(String catName, String dbName, String tblName,
-      List<String> partVals) {
-    Partition part = null;
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        part = tblWrapper.getPartition(partVals, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return part;
-  }
-
-  public boolean existPartitionFromCache(String catName, String dbName, String tblName,
-      List<String> partVals) {
-    boolean existsPart = false;
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        existsPart = tblWrapper.containsPartition(partVals);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return existsPart;
-  }
-
-  public Partition removePartitionFromCache(String catName, String dbName, String tblName,
-      List<String> partVals) {
-    Partition part = null;
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        part = tblWrapper.removePartition(partVals, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return part;
-  }
-
-  public void removePartitionsFromCache(String catName, String dbName, String tblName,
-      List<List<String>> partVals) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.removePartitions(partVals, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public List<Partition> listCachedPartitions(String catName, String dbName, String tblName,
-      int max) {
-    List<Partition> parts = new ArrayList<Partition>();
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        parts = tblWrapper.listPartitions(max, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return parts;
-  }
-
-  public void alterPartitionInCache(String catName, String dbName, String tblName,
-      List<String> partVals, Partition newPart) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.alterPartition(partVals, newPart, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void alterPartitionsInCache(String catName, String dbName, String tblName,
-      List<List<String>> partValsList, List<Partition> newParts) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.alterPartitions(partValsList, newParts, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void refreshPartitionsInCache(String catName, String dbName, String tblName,
-      List<Partition> partitions) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.refreshPartitions(partitions, this);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void removePartitionColStatsFromCache(String catName, String dbName, String tblName,
-      List<String> partVals, String colName) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.removePartitionColStats(partVals, colName);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void updatePartitionColStatsInCache(String catName, String dbName, String tableName,
-      List<String> partVals, List<ColumnStatisticsObj> colStatsObjs) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper =
-          tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
-      if (tblWrapper != null) {
-        tblWrapper.updatePartitionColStats(partVals, colStatsObjs);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public ColumnStatisticsObj getPartitionColStatsFromCache(String catName, String dbName,
-      String tblName, List<String> partVal, String colName) {
-    ColumnStatisticsObj colStatObj = null;
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        colStatObj = tblWrapper.getPartitionColStats(partVal, colName);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return colStatObj;
-  }
-
-  public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName,
-      List<ColumnStatistics> partitionColStats) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.refreshPartitionColStats(partitionColStats);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public List<ColumnStatisticsObj> getAggrStatsFromCache(String catName, String dbName,
-      String tblName, List<String> colNames, StatsType statsType) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        return tblWrapper.getAggrPartitionColStats(colNames, statsType);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-    return null;
-  }
-
-  public void addAggregateStatsToCache(String catName, String dbName, String tblName,
-      AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions,
-            aggrStatsAllButDefaultPartition);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public void refreshAggregateStatsInCache(String catName, String dbName, String tblName,
-      AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
-    try {
-      cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
-      if (tblWrapper != null) {
-        tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions,
-            aggrStatsAllButDefaultPartition);
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-  }
-
-  public synchronized void increSd(StorageDescriptor sd, byte[] sdHash) {
-    ByteArrayWrapper byteArray = new ByteArrayWrapper(sdHash);
-    if (sdCache.containsKey(byteArray)) {
-      sdCache.get(byteArray).refCount++;
-    } else {
-      StorageDescriptor sdToCache = sd.deepCopy();
-      sdToCache.setLocation(null);
-      sdToCache.setParameters(null);
-      sdCache.put(byteArray, new StorageDescriptorWrapper(sdToCache, 1));
-    }
-  }
-
-  public synchronized void decrSd(byte[] sdHash) {
-    ByteArrayWrapper byteArray = new ByteArrayWrapper(sdHash);
-    StorageDescriptorWrapper sdWrapper = sdCache.get(byteArray);
-    sdWrapper.refCount--;
-    if (sdWrapper.getRefCount() == 0) {
-      sdCache.remove(byteArray);
-    }
-  }
-
-  public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) {
-    StorageDescriptorWrapper sdWrapper = sdCache.get(new ByteArrayWrapper(sdHash));
-    return sdWrapper.getSd();
-  }
-
-  @VisibleForTesting
-  Map<String, Database> getDatabaseCache() {
-    return databaseCache;
-  }
-
-  @VisibleForTesting
-  Map<String, TableWrapper> getTableCache() {
-    return tableCache;
-  }
-
-  @VisibleForTesting
-  Map<ByteArrayWrapper, StorageDescriptorWrapper> getSdCache() {
-    return sdCache;
-  }
-
-  /**
-   * This resets the contents of the cataog cache so that we can re-fill it in another test.
-   */
-  void resetCatalogCache() {
-    isCatalogCachePrewarmed = false;
-    catalogCache.clear();
-    catalogsDeletedDuringPrewarm.clear();
-    isCatalogCacheDirty.set(false);
-  }
-
-  public long getUpdateCount() {
-    return cacheUpdateCount.get();
-  }
-
-  public void incrementUpdateCount() {
-    cacheUpdateCount.incrementAndGet();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
deleted file mode 100644
index be76d93..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.thrift.TException;
-
-public class CatalogBuilder {
-  private String name, description, location;
-
-  public CatalogBuilder setName(String name) {
-    this.name = name;
-    return this;
-  }
-
-  public CatalogBuilder setDescription(String description) {
-    this.description = description;
-    return this;
-  }
-
-  public CatalogBuilder setLocation(String location) {
-    this.location = location;
-    return this;
-  }
-
-  public Catalog build() throws MetaException {
-    if (name == null) throw new MetaException("You must name the catalog");
-    if (location == null) throw new MetaException("You must give the catalog a location");
-    Catalog catalog = new Catalog(name, location);
-    if (description != null) catalog.setDescription(description);
-    return catalog;
-  }
-
-  /**
-   * Build the catalog object and create it in the metastore.
-   * @param client metastore client
-   * @return new catalog object
-   * @throws TException thrown from the client
-   */
-  public Catalog create(IMetaStoreClient client) throws TException {
-    Catalog cat = build();
-    client.createCatalog(cat);
-    return cat;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
deleted file mode 100644
index 2e32cbf..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Base builder for all types of constraints.  Database name, table name, and column name
- * must be provided.
- * @param <T> Type of builder extending this.
- */
-abstract class ConstraintBuilder<T> {
-  protected String catName, dbName, tableName, constraintName;
-  List<String> columns;
-  protected boolean enable, validate, rely;
-  private int nextSeq;
-  private T child;
-
-  protected ConstraintBuilder() {
-    nextSeq = 1;
-    enable = true;
-    validate = rely = false;
-    dbName = Warehouse.DEFAULT_DATABASE_NAME;
-    columns = new ArrayList<>();
-  }
-
-  protected void setChild(T child) {
-    this.child = child;
-  }
-
-  protected void checkBuildable(String defaultConstraintName, Configuration conf)
-      throws MetaException {
-    if (tableName == null || columns.isEmpty()) {
-      throw new MetaException("You must provide table name and columns");
-    }
-    if (constraintName == null) {
-      constraintName = tableName + "_" + defaultConstraintName;
-    }
-    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
-  }
-
-  protected int getNextSeq() {
-    return nextSeq++;
-  }
-
-  public T setCatName(String catName) {
-    this.catName = catName;
-    return child;
-  }
-
-  public T setDbName(String dbName) {
-    this.dbName = dbName;
-    return child;
-  }
-
-  public T setTableName(String tableName) {
-    this.tableName = tableName;
-    return child;
-  }
-
-  public T onTable(Table table) {
-    this.catName = table.getCatName();
-    this.dbName = table.getDbName();
-    this.tableName = table.getTableName();
-    return child;
-  }
-
-  public T addColumn(String columnName) {
-    this.columns.add(columnName);
-    return child;
-  }
-
-  public T setConstraintName(String constraintName) {
-    this.constraintName = constraintName;
-    return child;
-  }
-
-  public T setEnable(boolean enable) {
-    this.enable = enable;
-    return child;
-  }
-
-  public T setValidate(boolean validate) {
-    this.validate = validate;
-    return child;
-  }
-
-  public T setRely(boolean rely) {
-    this.rely = rely;
-    return child;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
deleted file mode 100644
index f3d2182..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A builder for {@link Database}.  The name of the new database is required.  Everything else
- * selects reasonable defaults.
- */
-public class DatabaseBuilder {
-  private String name, description, location, catalogName;
-  private Map<String, String> params = new HashMap<>();
-  private String ownerName;
-  private PrincipalType ownerType;
-
-  public DatabaseBuilder() {
-  }
-
-  public DatabaseBuilder setCatalogName(String catalogName) {
-    this.catalogName = catalogName;
-    return this;
-  }
-
-  public DatabaseBuilder setCatalogName(Catalog catalog) {
-    this.catalogName = catalog.getName();
-    return this;
-  }
-
-  public DatabaseBuilder setName(String name) {
-    this.name = name;
-    return this;
-  }
-
-  public DatabaseBuilder setDescription(String description) {
-    this.description = description;
-    return this;
-  }
-
-  public DatabaseBuilder setLocation(String location) {
-    this.location = location;
-    return this;
-  }
-
-  public DatabaseBuilder setParams(Map<String, String> params) {
-    this.params = params;
-    return this;
-  }
-
-  public DatabaseBuilder addParam(String key, String value) {
-    params.put(key, value);
-    return this;
-  }
-
-  public DatabaseBuilder setOwnerName(String ownerName) {
-    this.ownerName = ownerName;
-    return this;
-  }
-
-  public DatabaseBuilder setOwnerType(PrincipalType ownerType) {
-    this.ownerType = ownerType;
-    return this;
-  }
-
-  public Database build(Configuration conf) throws MetaException {
-    if (name == null) throw new MetaException("You must name the database");
-    if (catalogName == null) catalogName = MetaStoreUtils.getDefaultCatalog(conf);
-    Database db = new Database(name, description, location, params);
-    db.setCatalogName(catalogName);
-    try {
-      if (ownerName == null) ownerName = SecurityUtils.getUser();
-      db.setOwnerName(ownerName);
-      if (ownerType == null) ownerType = PrincipalType.USER;
-      db.setOwnerType(ownerType);
-      return db;
-    } catch (IOException e) {
-      throw MetaStoreUtils.newMetaException(e);
-    }
-  }
-
-  /**
-   * Build the database, create it in the metastore, and then return the db object.
-   * @param client metastore client
-   * @param conf configuration file
-   * @return new database object
-   * @throws TException comes from {@link #build(Configuration)} or
-   * {@link IMetaStoreClient#createDatabase(Database)}.
-   */
-  public Database create(IMetaStoreClient client, Configuration conf) throws TException {
-    Database db = build(conf);
-    client.createDatabase(db);
-    return db;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
deleted file mode 100644
index c4c09dc..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Class for creating Thrift Function objects for tests, and API usage.
- */
-public class FunctionBuilder {
-  private String catName, dbName;
-  private String funcName = null;
-  private String className = null;
-  private String owner = null;
-  private PrincipalType ownerType;
-  private int createTime;
-  private FunctionType funcType;
-  private List<ResourceUri> resourceUris;
-
-  public FunctionBuilder() {
-    // Set some reasonable defaults
-    ownerType = PrincipalType.USER;
-    createTime = (int) (System.currentTimeMillis() / 1000);
-    funcType = FunctionType.JAVA;
-    resourceUris = new ArrayList<>();
-    dbName = Warehouse.DEFAULT_DATABASE_NAME;
-  }
-
-  public FunctionBuilder setCatName(String catName) {
-    this.catName = catName;
-    return this;
-  }
-
-  public FunctionBuilder setDbName(String dbName) {
-    this.dbName = dbName;
-    return this;
-  }
-
-  public FunctionBuilder inDb(Database db) {
-    this.dbName = db.getName();
-    this.catName = db.getCatalogName();
-    return this;
-  }
-
-  public FunctionBuilder setName(String funcName) {
-    this.funcName = funcName;
-    return this;
-  }
-
-  public FunctionBuilder setClass(String className) {
-    this.className = className;
-    return this;
-  }
-
-  public FunctionBuilder setOwner(String owner) {
-    this.owner = owner;
-    return this;
-  }
-
-  public FunctionBuilder setOwnerType(PrincipalType ownerType) {
-    this.ownerType = ownerType;
-    return this;
-  }
-
-  public FunctionBuilder setCreateTime(int createTime) {
-    this.createTime = createTime;
-    return this;
-  }
-
-  public FunctionBuilder setFunctionType(FunctionType funcType) {
-    this.funcType = funcType;
-    return this;
-  }
-
-  public FunctionBuilder setResourceUris(List<ResourceUri> resourceUris) {
-    this.resourceUris = resourceUris;
-    return this;
-  }
-
-  public FunctionBuilder addResourceUri(ResourceUri resourceUri) {
-    this.resourceUris.add(resourceUri);
-    return this;
-  }
-
-  public Function build(Configuration conf) throws MetaException {
-    try {
-      if (owner != null) {
-        owner = SecurityUtils.getUser();
-      }
-    } catch (IOException e) {
-      throw MetaStoreUtils.newMetaException(e);
-    }
-    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
-    Function f = new Function(funcName, dbName, className, owner, ownerType, createTime, funcType,
-        resourceUris);
-    f.setCatName(catName);
-    return f;
-  }
-
-  /**
-   * Create the function object in the metastore and return it.
-   * @param client metastore client
-   * @param conf configuration
-   * @return new function object
-   * @throws TException if thrown by build or the client.
-   */
-  public Function create(IMetaStoreClient client, Configuration conf) throws TException {
-    Function f = build(conf);
-    client.createFunction(f);
-    return f;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
deleted file mode 100644
index 26cea19..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
-import org.apache.hadoop.hive.metastore.api.GrantRevokeType;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-
-/**
- * A builder for {@link GrantRevokePrivilegeRequest}.  The revoke of grant option defaults to
- * false.  The request Type and the privileges must be provided.
- */
-public class GrantRevokePrivilegeRequestBuilder {
-  private GrantRevokeType requestType;
-  private PrivilegeBag privileges;
-  private boolean revokeGrantOption;
-
-  public GrantRevokePrivilegeRequestBuilder() {
-    privileges = new PrivilegeBag();
-    revokeGrantOption = false;
-  }
-
-  public GrantRevokePrivilegeRequestBuilder setRequestType(GrantRevokeType requestType) {
-    this.requestType = requestType;
-    return this;
-  }
-
-  public GrantRevokePrivilegeRequestBuilder setRevokeGrantOption(boolean revokeGrantOption) {
-    this.revokeGrantOption = revokeGrantOption;
-    return this;
-  }
-
-  public GrantRevokePrivilegeRequestBuilder addPrivilege(HiveObjectPrivilege privilege) {
-    privileges.addToPrivileges(privilege);
-    return this;
-  }
-
-  public GrantRevokePrivilegeRequest build() throws MetaException {
-    if (requestType == null || privileges.getPrivilegesSize() == 0) {
-      throw new MetaException("The request type and at least one privilege must be provided.");
-    }
-    GrantRevokePrivilegeRequest rqst = new GrantRevokePrivilegeRequest(requestType, privileges);
-    if (revokeGrantOption) rqst.setRevokeGrantOption(revokeGrantOption);
-    return rqst;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
deleted file mode 100644
index ed32f1c..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
-
-/**
- * Builder for {@link HiveObjectPrivilege}.  All values must be set.
- */
-public class HiveObjectPrivilegeBuilder {
-  private HiveObjectRef hiveObjectRef;
-  private String principleName;
-  private PrincipalType principalType;
-  private PrivilegeGrantInfo grantInfo;
-  private String authorizer;
-
-  public HiveObjectPrivilegeBuilder setHiveObjectRef(HiveObjectRef hiveObjectRef) {
-    this.hiveObjectRef = hiveObjectRef;
-    return this;
-  }
-
-  public HiveObjectPrivilegeBuilder setPrincipleName(String principleName) {
-    this.principleName = principleName;
-    return this;
-  }
-
-  public HiveObjectPrivilegeBuilder setPrincipalType(PrincipalType principalType) {
-    this.principalType = principalType;
-    return this;
-  }
-
-  public HiveObjectPrivilegeBuilder setGrantInfo(PrivilegeGrantInfo grantInfo) {
-    this.grantInfo = grantInfo;
-    return this;
-  }
-
-  public HiveObjectPrivilegeBuilder setAuthorizer(String authorizer) {
-    this.authorizer = authorizer;
-    return this;
-  }
-
-  public HiveObjectPrivilege build() throws MetaException {
-    if (hiveObjectRef == null || principleName == null || principalType == null ||
-        grantInfo == null) {
-      throw new MetaException("hive object reference, principle name and type, and grant info " +
-          "must all be provided");
-    }
-    return new HiveObjectPrivilege(hiveObjectRef, principleName, principalType, grantInfo, authorizer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
deleted file mode 100644
index 666b8ab..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.client.builder;
-
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import java.util.Collections;
-import java.util.List;
-
-/**
- * A builder for {@link HiveObjectRef}.  Unlike most builders (which allow a gradual building up
- * of the values) this gives a number of methods that take the object to be referenced and then
- * build the appropriate reference.  This is intended primarily for use with
- * {@link HiveObjectPrivilegeBuilder}
- */
-public class HiveObjectRefBuilder {
-  private HiveObjectType objectType;
-  private String dbName, objectName, columnName;
-  private List<String> partValues;
-
-  public HiveObjectRef buildGlobalReference() {
-    return new HiveObjectRef(HiveObjectType.GLOBAL, null, null, Collections.emptyList(), null);
-  }
-
-  public HiveObjectRef buildDatabaseReference(Database db) {
-    return new
-        HiveObjectRef(HiveObjectType.DATABASE, db.getName(), null, Collections.emptyList(), null);
-  }
-
-  public HiveObjectRef buildTableReference(Table table) {
-    return new HiveObjectRef(HiveObjectType.TABLE, table.getDbName(), table.getTableName(),
-        Collections.emptyList(), null);
-  }
-
-  public HiveObjectRef buildPartitionReference(Partition part) {
-    return new HiveObjectRef(HiveObjectType.PARTITION, part.getDbName(), part.getTableName(),
-        part.getValues(), null);
-  }
-
-  public HiveObjectRef buildColumnReference(Table table, String columnName) {
-    return new HiveObjectRef(HiveObjectType.COLUMN, table.getDbName(), table.getTableName(),
-        Collections.emptyList(), columnName);
-  }
-
-  public HiveObjectRef buildPartitionColumnReference(Table table, String columnName,
-                                                     List<String> partValues) {
-    return new HiveObjectRef(HiveObjectType.COLUMN, table.getDbName(), table.getTableName(),
-        partValues, columnName);
-  }
-}