You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by da...@apache.org on 2019/04/19 17:25:00 UTC

[hive] branch master updated: HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav Gumashta, reviewed by Daniel Dai)

This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new b58d50c  HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav Gumashta, reviewed by Daniel Dai)
b58d50c is described below

commit b58d50cb73a1f79a5d079e0a2c5ac33d2efc33a0
Author: Daniel Dai <da...@gmail.com>
AuthorDate: Fri Apr 19 10:23:01 2019 -0700

    HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav Gumashta, reviewed by Daniel Dai)
---
 .../hadoop/hive/metastore/cache/CacheUtils.java    |  4 +++
 .../hadoop/hive/metastore/cache/CachedStore.java   |  4 ++-
 .../hadoop/hive/metastore/cache/SharedCache.java   | 29 ++++++++++++++--------
 3 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
index 944c813..d50fa13 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
@@ -40,6 +40,10 @@ public class CacheUtils {
     return buildKey(catName.toLowerCase(), dbName.toLowerCase());
   }
 
+  public static String buildDbKeyWithDelimiterSuffix(String catName, String dbName) {
+    return buildKey(catName.toLowerCase(), dbName.toLowerCase()) + delimit;
+  }
+
   /**
    * Builds a key for the partition cache which is concatenation of partition values, each value
    * separated by a delimiter
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index e366ebd..6ef9a19 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -724,6 +724,7 @@ public class CachedStore implements RawStore, Configurable {
       } else {
         try {
           triggerPreWarm(rawStore);
+          shouldRunPrewarm = false;
         } catch (Exception e) {
           LOG.error("Prewarm failure", e);
           return;
@@ -815,7 +816,6 @@ public class CachedStore implements RawStore, Configurable {
         if (table != null && !table.isSetPartitionKeys()) {
           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
           Deadline.startTimer("getTableColumnStatistics");
-
           ColumnStatistics tableColStats =
               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
           Deadline.stopTimer();
@@ -865,7 +865,9 @@ public class CachedStore implements RawStore, Configurable {
                   rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
           Deadline.stopTimer();
           sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
+          Deadline.startTimer("getPartitionsByNames");
           List<Partition> parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+          Deadline.stopTimer();
           // Also save partitions for consistency as they have the stats state.
           for (Partition part : parts) {
             sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part);
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 1c23022..60862d4 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -1317,11 +1317,13 @@ public class SharedCache {
         //in case of retry, ignore second try.
         return;
       }
-      byte[] sdHash = tblWrapper.getSdHash();
-      if (sdHash != null) {
-        decrSd(sdHash);
+      if (tblWrapper != null) {
+        byte[] sdHash = tblWrapper.getSdHash();
+        if (sdHash != null) {
+          decrSd(sdHash);
+        }
+        isTableCacheDirty.set(true);
       }
-      isTableCacheDirty.set(true);
     } finally {
       cacheLock.writeLock().unlock();
     }
@@ -1438,25 +1440,30 @@ public class SharedCache {
 
   public void refreshTablesInCache(String catName, String dbName, List<Table> tables) {
     try {
-      cacheLock.writeLock().lock();
       if (isTableCacheDirty.compareAndSet(true, false)) {
         LOG.debug("Skipping table cache update; the table list we have is dirty.");
         return;
       }
-      Map<String, TableWrapper> newTableCache = new HashMap<>();
+      Map<String, TableWrapper> newCacheForDB = new TreeMap<>();
       for (Table tbl : tables) {
         String tblName = StringUtils.normalizeIdentifier(tbl.getTableName());
-        TableWrapper tblWrapper =
-            tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
+        TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
         if (tblWrapper != null) {
           tblWrapper.updateTableObj(tbl, this);
         } else {
           tblWrapper = createTableWrapper(catName, dbName, tblName, tbl);
         }
-        newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper);
+        newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper);
+      }
+      cacheLock.writeLock().lock();
+      Iterator<Entry<String, TableWrapper>> entryIterator = tableCache.entrySet().iterator();
+      while (entryIterator.hasNext()) {
+        String key = entryIterator.next().getKey();
+        if (key.startsWith(CacheUtils.buildDbKeyWithDelimiterSuffix(catName, dbName))) {
+          entryIterator.remove();
+        }
       }
-      tableCache.clear();
-      tableCache = newTableCache;
+      tableCache.putAll(newCacheForDB);
     } finally {
       cacheLock.writeLock().unlock();
     }