You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2018/05/15 00:17:33 UTC

[2/4] hbase git commit: HBASE-20447 Only fail cacheBlock if block collisions aren't related to next block metadata

HBASE-20447 Only fail cacheBlock if block collisions aren't related to next block metadata

When we pread, we don't force the read to read all of the next block header.
However, when we get into a race condition where two opener threads try to
cache the same block and one thread read all of the next block header and
the other one didn't, it will fail the open process. This is especially important
in a splitting case where it will potentially fail the split process.
Instead, in the caches, we should only fail if the required blocks are different.

Signed-off-by: Andrew Purtell <ap...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/944a221b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/944a221b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/944a221b

Branch: refs/heads/branch-1
Commit: 944a221b744df7573a2f5f0df5ff78c60fb2f968
Parents: ca544a1
Author: Zach York <zy...@amazon.com>
Authored: Wed Mar 21 15:37:09 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon May 14 16:09:36 2018 -0700

----------------------------------------------------------------------
 .../hbase/io/hfile/MemcachedBlockCache.java     |  2 +-
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java   | 39 ++++++++++++-
 .../apache/hadoop/hbase/io/hfile/Cacheable.java |  3 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java       | 25 +++++----
 .../hadoop/hbase/io/hfile/LruBlockCache.java    | 21 ++++---
 .../hbase/io/hfile/bucket/BucketCache.java      | 29 ++++++----
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 12 +++-
 .../hadoop/hbase/io/hfile/TestCacheConfig.java  |  2 +-
 .../hbase/io/hfile/TestCachedBlockQueue.java    |  2 +-
 .../hadoop/hbase/io/hfile/TestHFileBlock.java   | 25 ++++++++-
 .../hbase/io/hfile/TestLruBlockCache.java       | 58 +++++++++++++++++++-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 51 +++++++++++++++--
 12 files changed, 223 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index 4357262..f4e9621 100644
--- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -262,7 +262,7 @@ public class MemcachedBlockCache implements BlockCache {
     @Override
     public CachedData encode(HFileBlock block) {
       ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength());
-      block.serialize(bb);
+      block.serialize(bb, true);
       return new CachedData(0, bb.array(), CachedData.MAX_SIZE);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index a3d46ed..0f1ae20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -24,6 +24,10 @@ import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ConcurrentSkipListSet;
 
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
@@ -41,6 +45,7 @@ import org.codehaus.jackson.map.SerializationConfig;
 @InterfaceAudience.Private
 public class BlockCacheUtil {
 
+  private static final Log LOG = LogFactory.getLog(BlockCacheUtil.class);
 
   public static final long NANOS_PER_SECOND = 1000000000;
 
@@ -173,16 +178,44 @@ public class BlockCacheUtil {
     return cbsbf;
   }
 
-  public static int compareCacheBlock(Cacheable left, Cacheable right) {
+  private static int compareCacheBlock(Cacheable left, Cacheable right,
+                                       boolean includeNextBlockOnDiskSize) {
     ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
-    left.serialize(l);
+    left.serialize(l, includeNextBlockOnDiskSize);
     ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
-    right.serialize(r);
+    right.serialize(r, includeNextBlockOnDiskSize);
     return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(),
              r.array(), r.arrayOffset(), r.limit());
   }
 
   /**
+   * Validate that the existing and newBlock are the same without including the nextBlockMetadata,
+   * if not, throw an exception. If they are the same without the nextBlockMetadata,
+   * return the comparison.
+   *
+   * @param existing block that is existing in the cache.
+   * @param newBlock block that is trying to be cached.
+   * @param cacheKey the cache key of the blocks.
+   * @return comparison of the existing block to the newBlock.
+   */
+  public static int validateBlockAddition(Cacheable existing, Cacheable newBlock,
+                                          BlockCacheKey cacheKey) {
+    int comparison = compareCacheBlock(existing, newBlock, true);
+    if (comparison != 0) {
+      LOG.debug("Cached block contents differ, trying to just compare the block contents " +
+          "without the next block. CacheKey: " + cacheKey);
+
+      // compare the contents, if they are not equal, we are in big trouble
+      int comparisonWithoutNextBlockMetadata = compareCacheBlock(existing, newBlock, false);
+
+      Preconditions.checkArgument(comparisonWithoutNextBlockMetadata == 0,
+          "Cached block contents differ, which should not have happened. cacheKey:"
+              + cacheKey);
+    }
+    return comparison;
+  }
+
+  /**
    * Use one of these to keep a running account of cached blocks by file.  Throw it away when done.
    * This is different than metrics in that it is stats on current state of a cache.
    * See getLoadedCachedBlocksByFile

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
index f611c61..3a1fa8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
@@ -46,8 +46,9 @@ public interface Cacheable extends HeapSize {
   /**
    * Serializes its data into destination.
    * @param destination Where to serialize to
+   * @param includeNextBlockOnDiskSize Whether to include nextBlockMetadata in the Cache block.
    */
-  void serialize(ByteBuffer destination);
+  void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize);
 
   /**
    * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer.

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index b46a586..d177402 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -91,7 +91,7 @@ import com.google.common.base.Preconditions;
  * Caches cache whole blocks with trailing checksums if any. We then tag on some metadata, the
  * content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase'
  * checksums and then the offset into the file which is needed when we re-make a cache key
- * when we return the block to the cache as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
+ * when we return the block to the cache as 'done'. See {@link Cacheable#serialize(ByteBuffer, boolean)} and
  * {@link Cacheable#getDeserializer()}.
  *
  * <p>TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where
@@ -320,9 +320,11 @@ public class HFileBlock implements Cacheable {
    * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader}
    * @param fileContext HFile meta data
    */
-  HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
-      long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset,
-      final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) {
+  @VisibleForTesting
+  public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
+      int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer b, boolean fillHeader,
+      long offset, final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader,
+      HFileContext fileContext) {
     init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
         prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext);
     this.buf = b;
@@ -593,6 +595,7 @@ public class HFileBlock implements Cacheable {
       .append(", buf=[").append(buf).append("]")
       .append(", dataBeginsWith=").append(dataBegin)
       .append(", fileContext=").append(fileContext)
+      .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize)
       .append("]");
     return sb.toString();
   }
@@ -1892,12 +1895,10 @@ public class HFileBlock implements Cacheable {
 
   // Cacheable implementation
   @Override
-  public void serialize(ByteBuffer destination) {
-    // BE CAREFUL!! There is a custom version of this serialization over in BucketCache#doDrain.
-    // Make sure any changes in here are reflected over there.
+  public void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize) {
     ByteBufferUtils.copyFromBufferToBuffer(destination, this.buf, 0,
         getSerializedLength() - BLOCK_METADATA_SPACE);
-    destination = addMetaData(destination);
+    destination = addMetaData(destination, includeNextBlockOnDiskSize);
 
     // Make it ready for reading. flip sets position to zero and limit to current position which
     // is what we want if we do not want to serialize the block plus checksums if present plus
@@ -1910,7 +1911,7 @@ public class HFileBlock implements Cacheable {
    */
   public ByteBuffer getMetaData() {
     ByteBuffer bb = ByteBuffer.allocate(BLOCK_METADATA_SPACE);
-    bb = addMetaData(bb);
+    bb = addMetaData(bb, true);
     bb.flip();
     return bb;
   }
@@ -1919,10 +1920,12 @@ public class HFileBlock implements Cacheable {
    * Adds metadata at current position (position is moved forward). Does not flip or reset.
    * @return The passed <code>destination</code> with metadata added.
    */
-  private ByteBuffer addMetaData(final ByteBuffer destination) {
+  private ByteBuffer addMetaData(final ByteBuffer destination, boolean includeNextBlockMetadata) {
     destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
     destination.putLong(this.offset);
-    destination.putInt(this.nextBlockOnDiskSize);
+    if (includeNextBlockMetadata) {
+      destination.putInt(this.nextBlockOnDiskSize);
+    }
     return destination;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 1d17a7d..9cd2cca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -372,15 +372,20 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
 
     LruCachedBlock cb = map.get(cacheKey);
     if (cb != null) {
-      // compare the contents, if they are not equal, we are in big trouble
-      if (BlockCacheUtil.compareCacheBlock(buf, cb.getBuffer()) != 0) {
-        throw new RuntimeException("Cached block contents differ, which should not have happened."
-          + "cacheKey:" + cacheKey);
+      int comparison = BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
+      if (comparison != 0) {
+        if (comparison < 0) {
+          LOG.debug("Cached block contents differ by nextBlockOnDiskSize. Keeping cached block.");
+          return;
+        } else {
+          LOG.debug("Cached block contents differ by nextBlockOnDiskSize. Caching new block.");
+        }
+      } else {
+        String msg = "Cached an already cached block: " + cacheKey + " cb:" + cb.getCacheKey();
+        msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
+        LOG.debug(msg);
+        return;
       }
-      String msg = "Cached an already cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-      msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
-      LOG.warn(msg);
-      return;
     }
     long currentSize = size.get();
     long currentAcceptableSize = acceptableSize();

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 4ff73b4..dd0a056 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -419,21 +419,30 @@ public class BucketCache implements BlockCache, HeapSize {
       return;
     }
 
-    if (backingMap.containsKey(cacheKey)) {
+    if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey)) {
       /*
        * Compare already cached block only if lruBlockCache is not used to cache data blocks
        */
       if (!cacheDataInL1) {
         Cacheable existingBlock = getBlock(cacheKey, false, false, false);
-        if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-          throw new RuntimeException("Cached block contents differ, which should not have happened."
-                                    + "cacheKey:" + cacheKey);
+
+        int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+        if (comparison != 0) {
+          if (comparison < 0) {
+            LOG.debug("Cached block contents differ by nextBlockOnDiskSize. " +
+                "Keeping cached block which has nextBlockOnDiskSize populated.");
+            return;
+          } else {
+            LOG.debug("Cached block contents differ by nextBlockOnDiskSize. " +
+                "Caching new block which has nextBlockOnDiskSize populated.");
+          }
+        } else {
+          String msg = "Caching an already cached block: " + cacheKey;
+          msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
+          LOG.debug(msg);
+          return;
         }
       }
-      String msg = "Caching an already cached block: " + cacheKey;
-      msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
-      LOG.warn(msg);
-      return;
     }
 
     /*
@@ -975,7 +984,7 @@ public class BucketCache implements BlockCache, HeapSize {
   /**
    * Blocks until elements available in <code>q</code> then tries to grab as many as possible
    * before returning.
-   * @param recepticle Where to stash the elements taken from queue. We clear before we use it
+   * @param receptical Where to stash the elements taken from queue. We clear before we use it
    * just in case.
    * @param q The queue to take from.
    * @return <code>receptical laden with elements taken from the queue or empty if none found.
@@ -1426,7 +1435,7 @@ public class BucketCache implements BlockCache, HeapSize {
           ioEngine.write(metadata, offset + len - metadata.limit());
         } else {
           ByteBuffer bb = ByteBuffer.allocate(len);
-          data.serialize(bb);
+          data.serialize(bb, true);
           ioEngine.write(bb, offset);
         }
       } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index e584350..4c282e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -310,7 +310,7 @@ public class CacheTestUtils {
     }
 
     @Override
-    public void serialize(ByteBuffer destination) {
+    public void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize) {
       destination.putInt(buf.length);
       Thread.yield();
       destination.put(buf);
@@ -391,4 +391,14 @@ public class CacheTestUtils {
       return this.block;
     }
   }
+
+  public static void getBlockAndAssertEquals(BlockCache cache, BlockCacheKey key,
+                                             Cacheable blockToCache, ByteBuffer destBuffer,
+                                             ByteBuffer expectedBuffer) {
+    destBuffer.clear();
+    cache.cacheBlock(key, blockToCache);
+    Cacheable actualBlock = cache.getBlock(key, false, false, false);
+    actualBlock.serialize(destBuffer, true);
+    assertEquals(expectedBuffer, destBuffer);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index eea6558..b1cc58c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -126,7 +126,7 @@ public class TestCacheConfig {
     }
 
     @Override
-    public void serialize(ByteBuffer destination) {
+    public void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize) {
       LOG.info("Serialized " + this + " to " + destination);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
index 7cc3378..30c818c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
@@ -126,7 +126,7 @@ public class TestCachedBlockQueue extends TestCase {
             }
 
             @Override
-            public void serialize(ByteBuffer destination) {
+            public void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize) {
             }
 
             @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index 12c7857..da54e93 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -455,7 +455,7 @@ public class TestHFileBlock {
             // test serialized blocks
             for (boolean reuseBuffer : new boolean[] { false, true }) {
               ByteBuffer serialized = ByteBuffer.allocate(blockFromHFile.getSerializedLength());
-              blockFromHFile.serialize(serialized);
+              blockFromHFile.serialize(serialized, true);
               HFileBlock deserialized =
                 (HFileBlock) blockFromHFile.getDeserializer().deserialize(serialized, reuseBuffer);
               assertEquals(
@@ -845,4 +845,27 @@ public class TestHFileBlock {
           block.heapSize());
     }
   }
+
+  @Test
+  public void testSerializeWithoutNextBlockMetadata() {
+    int size = 100;
+    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
+    byte[] byteArr = new byte[length];
+    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
+    HFileContext meta = new HFileContextBuilder().build();
+    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, 52, -1, meta);
+    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, -1, -1, meta);
+    ByteBuffer buff1 = ByteBuffer.allocate(length);
+    ByteBuffer buff2 = ByteBuffer.allocate(length);
+    blockWithNextBlockMetadata.serialize(buff1, true);
+    blockWithoutNextBlockMetadata.serialize(buff2, true);
+    assertNotEquals(buff1, buff2);
+    buff1.clear();
+    buff2.clear();
+    blockWithNextBlockMetadata.serialize(buff1, false);
+    blockWithoutNextBlockMetadata.serialize(buff2, false);
+    assertEquals(buff1, buff2);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
index 92408f8..1e458c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
@@ -30,6 +30,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -798,6 +799,59 @@ public class TestLruBlockCache {
     assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta);
   }
 
+  @Test
+  public void testCacheBlockNextBlockMetadataMissing() {
+    long maxSize = 100000;
+    long blockSize = calculateBlockSize(maxSize, 10);
+    int size = 100;
+    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
+    byte[] byteArr = new byte[length];
+    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
+    HFileContext meta = new HFileContextBuilder().build();
+    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, 52, -1, meta);
+    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, -1, -1, meta);
+
+    LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
+        (int)Math.ceil(1.2*maxSize/blockSize),
+        LruBlockCache.DEFAULT_LOAD_FACTOR,
+        LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
+        0.66f, // min
+        0.99f, // acceptable
+        0.33f, // single
+        0.33f, // multi
+        0.34f, // memory
+        1.2f,  // limit
+        false,
+        1024);
+
+    BlockCacheKey key = new BlockCacheKey("key1", 0);
+    ByteBuffer actualBuffer = ByteBuffer.allocate(length);
+    ByteBuffer block1Buffer = ByteBuffer.allocate(length);
+    ByteBuffer block2Buffer = ByteBuffer.allocate(length);
+    blockWithNextBlockMetadata.serialize(block1Buffer, true);
+    blockWithoutNextBlockMetadata.serialize(block2Buffer, true);
+
+    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata,
+        actualBuffer, block1Buffer);
+
+    //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata,
+        actualBuffer, block1Buffer);
+
+    //Clear and add blockWithoutNextBlockMetadata
+    cache.clearCache();
+    assertNull(cache.getBlock(key, false, false, false));
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata,
+        actualBuffer, block2Buffer);
+
+    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata,
+        actualBuffer, block1Buffer);
+  }
+
   private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) {
     CachedItem [] blocks = new CachedItem[numBlocks];
     for(int i=0;i<numBlocks;i++) {
@@ -882,9 +936,9 @@ public class TestLruBlockCache {
     }
 
     @Override
-    public void serialize(ByteBuffer destination) {
+    public void serialize(ByteBuffer destination, boolean includeNextBlockOnDiskSize) {
     }
-    
+
     @Override
     public BlockType getBlockType() {
       return BlockType.DATA;

http://git-wip-us.apache.org/repos/asf/hbase/blob/944a221b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 014a796..14fb963 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -37,10 +39,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
 import org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.HFileBlock;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -104,17 +111,11 @@ public class TestBucketCache {
     @Override
     public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
         boolean cacheDataInL1) {
-      if (super.getBlock(cacheKey, true, false, true) != null) {
-        throw new RuntimeException("Cached an already cached block");
-      }
       super.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
     }
 
     @Override
     public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-      if (super.getBlock(cacheKey, true, false, true) != null) {
-        throw new RuntimeException("Cached an already cached block");
-      }
       super.cacheBlock(cacheKey, buf);
     }
   }
@@ -385,4 +386,42 @@ public class TestBucketCache {
     BucketCache.BucketEntry bucketEntry = new BucketCache.BucketEntry(testValue, 10, 10L, true);
     assertEquals(testValue, bucketEntry.offset());
   }
+
+  @Test
+  public void testCacheBlockNextBlockMetadataMissing() {
+    int size = 100;
+    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
+    byte[] byteArr = new byte[length];
+    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
+    HFileContext meta = new HFileContextBuilder().build();
+    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, 52, -1, meta);
+    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
+        HFileBlock.FILL_HEADER, -1, -1, -1, meta);
+
+    BlockCacheKey key = new BlockCacheKey("key1", 0);
+    ByteBuffer actualBuffer = ByteBuffer.allocate(length);
+    ByteBuffer block1Buffer = ByteBuffer.allocate(length);
+    ByteBuffer block2Buffer = ByteBuffer.allocate(length);
+    blockWithNextBlockMetadata.serialize(block1Buffer, true);
+    blockWithoutNextBlockMetadata.serialize(block2Buffer, true);
+
+    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata,
+        actualBuffer, block1Buffer);
+
+    //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata,
+        actualBuffer, block1Buffer);
+
+    //Clear and add blockWithoutNextBlockMetadata
+    cache.evictBlock(key);
+    assertNull(cache.getBlock(key, false, false, false));
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata,
+        actualBuffer, block2Buffer);
+
+    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
+    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata,
+        actualBuffer, block1Buffer);
+  }
 }