You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2016/08/01 18:11:01 UTC

[4/5] hbase git commit: HBASE-16288 HFile intermediate block level indexes might recurse forever creating multi TB files

HBASE-16288 HFile intermediate block level indexes might recurse forever creating multi TB files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/19831262
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/19831262
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/19831262

Branch: refs/heads/branch-1
Commit: 198312621624b2f5df33582964416594c6b154a3
Parents: 1b303ad
Author: Enis Soztutar <en...@apache.org>
Authored: Mon Aug 1 10:57:09 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Mon Aug 1 11:06:27 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  | 43 +++++++++++++++++---
 .../hadoop/hbase/io/hfile/HFileWriterV2.java    |  2 +
 .../hbase/io/hfile/TestHFileBlockIndex.java     | 43 ++++++++++++++++++++
 3 files changed, 83 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/19831262/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index e44b4c9..0962940 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -74,6 +74,16 @@ public class HFileBlockIndex {
   public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size";
 
   /**
+   * Minimum number of entries in a single index block. Even if we are above the
+   * hfile.index.block.max.size we will keep writing to the same block unless we have that many
+   * entries. We should have at least a few entries so that we don't have too many levels in the
+   * multi-level index. This should be at least 2 to make sure there is no infinite recursion.
+   */
+  public static final String MIN_INDEX_NUM_ENTRIES_KEY = "hfile.index.block.min.entries";
+
+  static final int DEFAULT_MIN_INDEX_NUM_ENTRIES = 16;
+
+  /**
    * The number of bytes stored in each "secondary index" entry in addition to
    * key bytes in the non-root index block format. The first long is the file
    * offset of the deeper-level block the entry points to, and the int that
@@ -195,7 +205,7 @@ public class HFileBlockIndex {
      * Return the BlockWithScanInfo which contains the DataBlock with other scan
      * info such as nextIndexedKey. This function will only be called when the
      * HFile version is larger than 1.
-     * 
+     *
      * @param key
      *          the key we are looking for
      * @param currentBlock
@@ -496,7 +506,7 @@ public class HFileBlockIndex {
      * Performs a binary search over a non-root level index block. Utilizes the
      * secondary index, which records the offsets of (offset, onDiskSize,
      * firstKey) tuples of all entries.
-     * 
+     *
      * @param key
      *          the key we are searching for offsets to individual entries in
      *          the blockIndex buffer
@@ -795,6 +805,9 @@ public class HFileBlockIndex {
     /** The maximum size guideline of all multi-level index blocks. */
     private int maxChunkSize;
 
+    /** The maximum level of multi-level index blocks */
+    private int minIndexNumEntries;
+
     /** Whether we require this block index to always be single-level. */
     private boolean singleLevelOnly;
 
@@ -827,15 +840,23 @@ public class HFileBlockIndex {
       this.cacheConf = cacheConf;
       this.nameForCaching = nameForCaching;
       this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE;
+      this.minIndexNumEntries = HFileBlockIndex.DEFAULT_MIN_INDEX_NUM_ENTRIES;
     }
 
     public void setMaxChunkSize(int maxChunkSize) {
       if (maxChunkSize <= 0) {
-        throw new IllegalArgumentException("Invald maximum index block size");
+        throw new IllegalArgumentException("Invalid maximum index block size");
       }
       this.maxChunkSize = maxChunkSize;
     }
 
+    public void setMinIndexNumEntries(int minIndexNumEntries) {
+      if (minIndexNumEntries <= 1) {
+        throw new IllegalArgumentException("Invalid maximum index level, should be >= 2");
+      }
+      this.minIndexNumEntries = minIndexNumEntries;
+    }
+
     /**
      * Writes the root level and intermediate levels of the block index into
      * the output stream, generating the tree from bottom up. Assumes that the
@@ -867,7 +888,11 @@ public class HFileBlockIndex {
           : null;
 
       if (curInlineChunk != null) {
-        while (rootChunk.getRootSize() > maxChunkSize) {
+        while (rootChunk.getRootSize() > maxChunkSize
+            // HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely
+            && rootChunk.getNumEntries() > minIndexNumEntries
+            // Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed
+            && numLevels < 16) {
           rootChunk = writeIntermediateLevel(out, rootChunk);
           numLevels += 1;
         }
@@ -963,8 +988,12 @@ public class HFileBlockIndex {
         curChunk.add(currentLevel.getBlockKey(i),
             currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i));
 
-        if (curChunk.getRootSize() >= maxChunkSize)
+        // HBASE-16288: We have to have at least minIndexNumEntries(16) items in the index so that
+        // we won't end up with too-many levels for a index with very large rowKeys. Also, if the
+        // first key is larger than maxChunkSize this will cause infinite recursion.
+        if (i >= minIndexNumEntries && curChunk.getRootSize() >= maxChunkSize) {
           writeIntermediateBlock(out, parent, curChunk);
+        }
       }
 
       if (curChunk.getNumEntries() > 0) {
@@ -1457,4 +1486,8 @@ public class HFileBlockIndex {
   public static int getMaxChunkSize(Configuration conf) {
     return conf.getInt(MAX_CHUNK_SIZE_KEY, DEFAULT_MAX_CHUNK_SIZE);
   }
+
+  public static int getMinIndexNumEntries(Configuration conf) {
+    return conf.getInt(MIN_INDEX_NUM_ENTRIES_KEY, DEFAULT_MIN_INDEX_NUM_ENTRIES);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/19831262/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index 054a769..9e68dc3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -132,6 +132,8 @@ public class HFileWriterV2 extends AbstractHFileWriter {
         cacheIndexesOnWrite ? name : null);
     dataBlockIndexWriter.setMaxChunkSize(
         HFileBlockIndex.getMaxChunkSize(conf));
+    dataBlockIndexWriter.setMinIndexNumEntries(
+        HFileBlockIndex.getMinIndexNumEntries(conf));
     inlineBlockWriters.add(dataBlockIndexWriter);
 
     // Meta data block index writer

http://git-wip-us.apache.org/repos/asf/hbase/blob/19831262/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 772ddc5..b3e0ade 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -659,6 +659,49 @@ public class TestHFileBlockIndex {
         valueRead);
   }
 
+  @Test(timeout=10000)
+  public void testIntermediateLevelIndicesWithLargeKeys() throws IOException {
+    testIntermediateLevelIndicesWithLargeKeys(16);
+  }
+
+  @Test(timeout=10000)
+  public void testIntermediateLevelIndicesWithLargeKeysWithMinNumEntries() throws IOException {
+    // because of the large rowKeys, we will end up with a 50-level block index without sanity check
+    testIntermediateLevelIndicesWithLargeKeys(2);
+  }
+
+  public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws IOException {
+    Path hfPath = new Path(TEST_UTIL.getDataTestDir(),
+      "testIntermediateLevelIndicesWithLargeKeys.hfile");
+    int maxChunkSize = 1024;
+    FileSystem fs = FileSystem.get(conf);
+    CacheConfig cacheConf = new CacheConfig(conf);
+    conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
+    conf.setInt(HFileBlockIndex.MIN_INDEX_NUM_ENTRIES_KEY, minNumEntries);
+    HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
+    HFileWriterV2 hfw =
+        (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
+        .withFileContext(context)
+        .withPath(fs, hfPath).create();
+    List<byte[]> keys = new ArrayList<byte[]>();
+
+    // This should result in leaf-level indices and a root level index
+    for (int i=0; i < 100; i++) {
+      byte[] rowkey = new byte[maxChunkSize + 1];
+      byte[] b = Bytes.toBytes(i);
+      System.arraycopy(b, 0, rowkey, rowkey.length - b.length, b.length);
+      keys.add(rowkey);
+      hfw.append(CellUtil.createCell(rowkey));
+    }
+    hfw.close();
 
+    HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, conf);
+    // Scanner doesn't do Cells yet.  Fix.
+    HFileScanner scanner = reader.getScanner(true, true);
+    for (int i = 0; i < keys.size(); ++i) {
+      scanner.seekTo(CellUtil.createCell(keys.get(i)));
+    }
+    reader.close();
+  }
 }