You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/09/29 05:52:49 UTC
svn commit: r1391741 - in /hbase/trunk/hbase-server/src:
main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
Author: stack
Date: Sat Sep 29 03:52:49 2012
New Revision: 1391741
URL: http://svn.apache.org/viewvc?rev=1391741&view=rev
Log:
HBASE-6871 HFileBlockIndex Write Error in HFile V2 due to incorrect split into intermediate index blocks
Modified:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java?rev=1391741&r1=1391740&r2=1391741&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java Sat Sep 29 03:52:49 2012
@@ -829,7 +829,7 @@ public class HFileBlockIndex {
* @throws IOException
*/
public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
- if (curInlineChunk.getNumEntries() != 0) {
+ if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
throw new IOException("Trying to write a multi-level block index, " +
"but are " + curInlineChunk.getNumEntries() + " entries in the " +
"last inline chunk.");
@@ -840,9 +840,11 @@ public class HFileBlockIndex {
byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata()
: null;
- while (rootChunk.getRootSize() > maxChunkSize) {
- rootChunk = writeIntermediateLevel(out, rootChunk);
- numLevels += 1;
+ if (curInlineChunk != null) {
+ while (rootChunk.getRootSize() > maxChunkSize) {
+ rootChunk = writeIntermediateLevel(out, rootChunk);
+ numLevels += 1;
+ }
}
// write the root level
@@ -1004,11 +1006,18 @@ public class HFileBlockIndex {
*/
@Override
public boolean shouldWriteBlock(boolean closing) {
- if (singleLevelOnly)
+ if (singleLevelOnly) {
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
+ }
- if (curInlineChunk.getNumEntries() == 0)
+ if (curInlineChunk == null) {
+ throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " +
+ "called with closing=true and then called again?");
+ }
+
+ if (curInlineChunk.getNumEntries() == 0) {
return false;
+ }
// We do have some entries in the current inline chunk.
if (closing) {
@@ -1018,7 +1027,7 @@ public class HFileBlockIndex {
expectNumLevels(1);
rootChunk = curInlineChunk;
- curInlineChunk = new BlockIndexChunk();
+ curInlineChunk = null; // Disallow adding any more index entries.
return false;
}
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java?rev=1391741&r1=1391740&r2=1391741&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java Sat Sep 29 03:52:49 2012
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.experimental.categories.Category;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Test;
+
+/**
+ * Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
+ * HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate
+ * a leaf index chunk that only goes over the configured index chunk size after adding the last
+ * key/value. The bug is in that when we close the file, we convert that inline (leaf-level) chunk
+ * into a root chunk, but then look at the size of that root chunk, find that it is greater than
+ * the configured chunk size, and split it into a number of intermediate index blocks that should
+ * really be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add
+ * another entry to the root-level block, and that would prevent us from upgrading the leaf-level
+ * chunk to the root chunk, thus not triggering the bug.
+ */
+@Category(SmallTests.class)
+public class TestHFileInlineToRootChunkConversion {
+ private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
+ private final Configuration conf = testUtil.getConfiguration();
+
+ @Test
+ public void testWriteHFile() throws Exception {
+ Path hfPath = new Path(testUtil.getDataTestDir(),
+ TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile");
+ int maxChunkSize = 1024;
+ FileSystem fs = FileSystem.get(conf);
+ CacheConfig cacheConf = new CacheConfig(conf);
+ conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
+ HFileWriterV2 hfw =
+ (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
+ .withBlockSize(16)
+ .withPath(fs, hfPath).create();
+ List<byte[]> keys = new ArrayList<byte[]>();
+ StringBuilder sb = new StringBuilder();
+
+ for (int i = 0; i < 4; ++i) {
+ sb.append("key" + String.format("%05d", i));
+ sb.append("_");
+ for (int j = 0; j < 100; ++j) {
+ sb.append('0' + j);
+ }
+ String keyStr = sb.toString();
+ sb.setLength(0);
+
+ byte[] k = Bytes.toBytes(keyStr);
+ System.out.println("Key: " + Bytes.toString(k));
+ keys.add(k);
+ byte[] v = Bytes.toBytes("value" + i);
+ hfw.append(k, v);
+ }
+ hfw.close();
+
+ HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf);
+ HFileScanner scanner = reader.getScanner(true, true);
+ for (int i = 0; i < keys.size(); ++i) {
+ scanner.seekTo(keys.get(i));
+ }
+ reader.close();
+ }
+}