You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by op...@apache.org on 2019/06/24 02:31:17 UTC

[hbase] 21/23: HBASE-22531 The HFileReaderImpl#shouldUseHeap return the incorrect true when disabled BlockCache (#304)

This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit ccabbdd40aff886899a12744de0f6591dd49c6af
Author: openinx <op...@gmail.com>
AuthorDate: Fri Jun 14 09:41:11 2019 +0800

    HBASE-22531 The HFileReaderImpl#shouldUseHeap return the incorrect true when disabled BlockCache (#304)
---
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     |  2 +-
 .../hfile/TestHFileScannerImplReferenceCount.java  | 25 ++++++++++++++++++++++
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 0dae13c..9cef9c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1419,7 +1419,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
    *      boolean, boolean)
    */
   private boolean shouldUseHeap(BlockType expectedBlockType) {
-    if (cacheConf.getBlockCache() == null) {
+    if (!cacheConf.getBlockCache().isPresent()) {
       return false;
     } else if (!cacheConf.isCombinedBlockCache()) {
       // Block to cache in LruBlockCache must be an heap one. So just allocate block memory from
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
index 87dd29e..dd9a1c8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 
 import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
 import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
+import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY;
 import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY;
 import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY;
 import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY;
@@ -422,4 +423,28 @@ public class TestHFileScannerImplReferenceCount {
     Assert.assertNull(scanner.curBlock);
     Assert.assertTrue(scanner.prevBlocks.isEmpty());
   }
+
+  @Test
+  public void testDisabledBlockCache() throws Exception {
+    writeHFile(conf, fs, hfilePath, Algorithm.NONE, DataBlockEncoding.NONE, CELL_COUNT);
+    // Set LruBlockCache
+    conf.setFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
+    BlockCache defaultBC = BlockCacheFactory.createBlockCache(conf);
+    Assert.assertNull(defaultBC);
+    CacheConfig cacheConfig = new CacheConfig(conf, null, defaultBC, allocator);
+    Assert.assertFalse(cacheConfig.isCombinedBlockCache()); // Must be LruBlockCache.
+    HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConfig, true, conf);
+    Assert.assertTrue(reader instanceof HFileReaderImpl);
+    // We've build a HFile tree with index = 16.
+    Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());
+
+    HFileBlock block1 = reader.getDataBlockIndexReader()
+        .loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE)
+        .getHFileBlock();
+
+    Assert.assertTrue(block1.isSharedMem());
+    Assert.assertTrue(block1 instanceof SharedMemHFileBlock);
+    Assert.assertEquals(1, block1.refCnt());
+    Assert.assertTrue(block1.release());
+  }
 }