You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by wc...@apache.org on 2023/03/27 10:27:59 UTC

[hbase] branch branch-2 updated: HBASE-27750: Update the list of prefetched Hfiles upon block eviction (#5140)

This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 61e18a3d6c2 HBASE-27750: Update the list of prefetched Hfiles upon block eviction (#5140)
61e18a3d6c2 is described below

commit 61e18a3d6c229c31dfe6f12d485c38fe55f2d804
Author: Kota-SH <sh...@gmail.com>
AuthorDate: Mon Mar 27 04:49:55 2023 -0500

    HBASE-27750: Update the list of prefetched Hfiles upon block eviction (#5140)
    
    Signed-off-by: Wellington Chevreuil <wc...@apache.org>
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  4 ++++
 .../hadoop/hbase/io/hfile/TestPrefetchRSClose.java |  2 +-
 .../io/hfile/bucket/TestBucketCachePersister.java  | 22 +++++++++++++++++++---
 3 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index a9001c447f8..1d96c0bd87e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -368,6 +368,7 @@ public class BucketCache implements BlockCache, HeapSize {
   void startBucketCachePersisterThread() {
     BucketCachePersister cachePersister =
       new BucketCachePersister(this, bucketcachePersistInterval);
+    cachePersister.setDaemon(true);
     cachePersister.start();
   }
 
@@ -599,6 +600,9 @@ public class BucketCache implements BlockCache, HeapSize {
       cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
     }
     if (ioEngine.isPersistent()) {
+      if (prefetchedFileListPath != null) {
+        PrefetchExecutor.removePrefetchedFileWhileEvict(cacheKey.getHfileName());
+      }
       setCacheInconsistent(true);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java
index b0c4cafb2de..ee324e9dbaa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java
@@ -107,7 +107,7 @@ public class TestPrefetchRSClose {
       table.put(put1);
       TEST_UTIL.flush(tableName);
     } finally {
-      Thread.sleep(1500);
+      Thread.sleep(2000);
     }
 
     // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java
index 171d62d8b2c..2fbcf2850e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor;
 import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -119,21 +120,36 @@ public class TestBucketCachePersister {
 
   @Test
   public void testPrefetchPersistenceCrashNegative() throws Exception {
-    long bucketCachePersistInterval = 3000;
+    long bucketCachePersistInterval = Long.MAX_VALUE;
     Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval);
     BucketCache bucketCache = setupBucketCache(conf);
     CacheConfig cacheConf = new CacheConfig(conf, bucketCache);
     FileSystem fs = HFileSystem.get(conf);
     // Load Cache
     Path storeFile = writeStoreFile("TestPrefetch2", conf, cacheConf, fs);
-    Path storeFile2 = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
     readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache);
-    readStoreFile(storeFile2, 0, fs, cacheConf, conf, bucketCache);
     assertFalse(new File(testDir + "/prefetch.persistence").exists());
     assertFalse(new File(testDir + "/bucket.persistence").exists());
     cleanupBucketCache(bucketCache);
   }
 
+  @Test
+  public void testPrefetchListUponBlockEviction() throws Exception {
+    Configuration conf = setupBucketCacheConfig(200);
+    BucketCache bucketCache1 = setupBucketCache(conf);
+    CacheConfig cacheConf = new CacheConfig(conf, bucketCache1);
+    FileSystem fs = HFileSystem.get(conf);
+    // Load Blocks in cache
+    Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs);
+    readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache1);
+    Thread.sleep(500);
+    // Evict Blocks from cache
+    BlockCacheKey bucketCacheKey = bucketCache1.backingMap.entrySet().iterator().next().getKey();
+    assertTrue(PrefetchExecutor.isFilePrefetched(storeFile.getName()));
+    bucketCache1.evictBlock(bucketCacheKey);
+    assertFalse(PrefetchExecutor.isFilePrefetched(storeFile.getName()));
+  }
+
   public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheConfig cacheConf,
     Configuration conf, BucketCache bucketCache) throws Exception {
     // Open the file