You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2014/06/18 05:33:52 UTC

git commit: HBASE-11364 [BlockCache] Add a flag to cache data blocks in L1 if multi-tier cache

Repository: hbase
Updated Branches:
  refs/heads/master b18c7b122 -> 3ed3c5513


HBASE-11364 [BlockCache] Add a flag to cache data blocks in L1 if multi-tier cache


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ed3c551
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ed3c551
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ed3c551

Branch: refs/heads/master
Commit: 3ed3c5513cc26a2158173caab8d36b6d7f544009
Parents: b18c7b1
Author: Michael Stack <st...@apache.org>
Authored: Tue Jun 17 22:33:40 2014 -0500
Committer: Michael Stack <st...@apache.org>
Committed: Tue Jun 17 22:33:40 2014 -0500

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 77 ++++++++++-------
 .../apache/hadoop/hbase/HTableDescriptor.java   |  6 ++
 .../hadoop/hbase/io/hfile/BlockCache.java       |  5 +-
 .../hadoop/hbase/io/hfile/CacheConfig.java      | 42 +++++++++-
 .../hbase/io/hfile/CombinedBlockCache.java      | 11 +--
 .../hadoop/hbase/io/hfile/DoubleBlockCache.java |  5 +-
 .../hadoop/hbase/io/hfile/HFileReaderV2.java    |  5 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java    | 36 +++++---
 .../hbase/io/hfile/bucket/BucketCache.java      |  6 +-
 .../hbase/io/hfile/slab/SingleSizeCache.java    |  3 +-
 .../hadoop/hbase/io/hfile/slab/SlabCache.java   |  3 +-
 .../security/access/AccessControlLists.java     |  5 +-
 .../hadoop/hbase/io/hfile/TestCacheConfig.java  | 52 +++++++++++-
 .../hbase/io/hfile/TestLruBlockCache.java       | 28 +++----
 .../hbase/io/hfile/bucket/TestBucketCache.java  |  7 +-
 .../regionserver/TestHeapMemoryManager.java     |  3 +-
 src/main/docbkx/book.xml                        | 87 +++++++++++++++-----
 17 files changed, 281 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 0b7c382..a8626ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -89,6 +89,13 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
   public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
   /**
+   * Key for cache data into L1 if cache is set up with more than one tier.
+   * To set in the shell, do something like this:
+   * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
+   */
+  public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
+
+  /**
    * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
    * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
    * family will be loaded into the cache as soon as the file is opened. These
@@ -151,7 +158,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   private volatile Integer blocksize = null;
 
   /**
-   * Default setting for whether to serve from memory or not.
+   * Default setting for whether to try and serve this column family from memory or not.
    */
   public static final boolean DEFAULT_IN_MEMORY = false;
 
@@ -172,6 +179,13 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
 
   /**
+   * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
+   * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
+   * using BucketCache.
+   */
+  public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
+
+  /**
    * Default setting for whether to cache index blocks on write if block
    * caching is enabled.
    */
@@ -236,6 +250,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
+      DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
@@ -742,7 +757,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   }
 
   /**
-   * @return True if we are to keep all in use HRegionServer cache.
+   * @return True if we are to favor keeping all values for this column family in the 
+   * HRegionServer cache.
    */
   public boolean isInMemory() {
     String value = getValue(HConstants.IN_MEMORY);
@@ -752,8 +768,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   }
 
   /**
-   * @param inMemory True if we are to keep all values in the HRegionServer
-   * cache
+   * @param inMemory True if we are to favor keeping all values for this column family in the
+   * HRegionServer cache
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setInMemory(boolean inMemory) {
@@ -872,11 +888,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
    * @return true if we should cache data blocks on write
    */
   public boolean shouldCacheDataOnWrite() {
-    String value = getValue(CACHE_DATA_ON_WRITE);
-    if (value != null) {
-      return Boolean.valueOf(value).booleanValue();
-    }
-    return DEFAULT_CACHE_DATA_ON_WRITE;
+    return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
   }
 
   /**
@@ -888,14 +900,33 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
   }
 
   /**
+   * @return true if we should cache data blocks in the L1 cache (if block cache deploy
+   * has more than one tier; e.g. we are using CombinedBlockCache).
+   */
+  public boolean shouldCacheDataInL1() {
+    return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
+  }
+
+  /**
+   * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
+   * has more than one tier; e.g. we are using CombinedBlockCache).
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setCacheDataInL1(boolean value) {
+    return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
+  }
+
+  private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
+    String value = getValue(key);
+    if (value != null) return Boolean.valueOf(value).booleanValue();
+    return defaultSetting;
+  }
+
+  /**
    * @return true if we should cache index blocks on write
    */
   public boolean shouldCacheIndexesOnWrite() {
-    String value = getValue(CACHE_INDEX_ON_WRITE);
-    if (value != null) {
-      return Boolean.valueOf(value).booleanValue();
-    }
-    return DEFAULT_CACHE_INDEX_ON_WRITE;
+    return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
   }
 
   /**
@@ -910,11 +941,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
    * @return true if we should cache bloomfilter blocks on write
    */
   public boolean shouldCacheBloomsOnWrite() {
-    String value = getValue(CACHE_BLOOMS_ON_WRITE);
-    if (value != null) {
-      return Boolean.valueOf(value).booleanValue();
-    }
-    return DEFAULT_CACHE_BLOOMS_ON_WRITE;
+    return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
   }
 
   /**
@@ -930,11 +957,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
    * close
    */
   public boolean shouldEvictBlocksOnClose() {
-    String value = getValue(EVICT_BLOCKS_ON_CLOSE);
-    if (value != null) {
-      return Boolean.valueOf(value).booleanValue();
-    }
-    return DEFAULT_EVICT_BLOCKS_ON_CLOSE;
+    return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
   }
 
   /**
@@ -950,11 +973,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
    * @return true if we should prefetch blocks into the blockcache on open
    */
   public boolean shouldPrefetchBlocksOnOpen() {
-    String value = getValue(PREFETCH_BLOCKS_ON_OPEN);
-   if (value != null) {
-      return Boolean.valueOf(value).booleanValue();
-    }
-    return DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
+    return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 59b1394..c1add47 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -1318,6 +1318,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
               .setBloomFilterType(BloomType.NONE)
+              // Enable cache of data blocks in L1 if more than one caching tier deployed:
+              // e.g. if using CombinedBlockCache (BucketCache).
+              .setCacheDataInL1(true)
       });
 
   static {
@@ -1345,6 +1348,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
               .setInMemory(true)
               .setBlocksize(8 * 1024)
               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+              // Enable cache of data blocks in L1 if more than one caching tier deployed:
+              // e.g. if using CombinedBlockCache (BucketCache).
+              .setCacheDataInL1(true)
       });
 
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index f0540e5..b81845c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -33,8 +33,11 @@ public interface BlockCache extends Iterable<CachedBlock> {
    * @param cacheKey The block's cache key.
    * @param buf The block contents wrapped in a ByteBuffer.
    * @param inMemory Whether block should be treated as in-memory
+   * @param cacheDataInL1 If multi-tier block cache deploy -- i.e. has an L1 and L2 tier -- then
+   * if this flag is true, cache data blocks up in the L1 tier (meta blocks are probably being
+   * cached in L1 already).
    */
-  void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory);
+  void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, boolean cacheDataInL1);
 
   /**
    * Add block to cache (defaults to not in-memory).

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index f017701..223e933 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -203,6 +203,13 @@ public class CacheConfig {
   private final boolean prefetchOnOpen;
 
   /**
+   * If true and if more than one tier in this cache deploy -- e.g. CombinedBlockCache has an L1
+   * and an L2 tier -- then cache data blocks up in the L1 tier (The meta blocks are likely being
+   * cached up in L1 already.  At least this is the case if CombinedBlockCache).
+   */
+  private boolean cacheDataInL1;
+
+  /**
    * Create a cache configuration using the specified configuration object and
    * family descriptor.
    * @param conf hbase configuration
@@ -224,7 +231,9 @@ public class CacheConfig {
             DEFAULT_EVICT_ON_CLOSE) || family.shouldEvictBlocksOnClose(),
         conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_COMPRESSED_CACHE),
         conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
-            DEFAULT_PREFETCH_ON_OPEN) || family.shouldPrefetchBlocksOnOpen()
+            DEFAULT_PREFETCH_ON_OPEN) || family.shouldPrefetchBlocksOnOpen(),
+        conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
+            HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.shouldCacheDataInL1()
      );
   }
 
@@ -246,7 +255,9 @@ public class CacheConfig {
         conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
         conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY,
             DEFAULT_COMPRESSED_CACHE),
-        conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN)
+        conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN),
+        conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
+          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1)
      );
   }
 
@@ -263,12 +274,15 @@ public class CacheConfig {
    * @param evictOnClose whether blocks should be evicted when HFile is closed
    * @param cacheCompressed whether to store blocks as compressed in the cache
    * @param prefetchOnOpen whether to prefetch blocks upon open
+   * @param cacheDataInL1 If more than one cache tier deployed, if true, cache this column families
+   * data blocks up in the L1 tier.
    */
   CacheConfig(final BlockCache blockCache,
       final boolean cacheDataOnRead, final boolean inMemory,
       final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
       final boolean cacheBloomsOnWrite, final boolean evictOnClose,
-      final boolean cacheCompressed, final boolean prefetchOnOpen) {
+      final boolean cacheCompressed, final boolean prefetchOnOpen,
+      final boolean cacheDataInL1) {
     this.blockCache = blockCache;
     this.cacheDataOnRead = cacheDataOnRead;
     this.inMemory = inMemory;
@@ -278,6 +292,7 @@ public class CacheConfig {
     this.evictOnClose = evictOnClose;
     this.cacheCompressed = cacheCompressed;
     this.prefetchOnOpen = prefetchOnOpen;
+    this.cacheDataInL1 = cacheDataInL1;
     LOG.info(this);
   }
 
@@ -289,7 +304,8 @@ public class CacheConfig {
     this(cacheConf.blockCache, cacheConf.cacheDataOnRead, cacheConf.inMemory,
         cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite,
         cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose,
-        cacheConf.cacheCompressed, cacheConf.prefetchOnOpen);
+        cacheConf.cacheCompressed, cacheConf.prefetchOnOpen,
+        cacheConf.cacheDataInL1);
   }
 
   /**
@@ -340,6 +356,13 @@ public class CacheConfig {
   }
 
   /**
+   * @return True if cache data blocks in L1 tier (if more than one tier in block cache deploy).
+   */
+  public boolean isCacheDataInL1() {
+    return isBlockCacheEnabled() && this.cacheDataInL1;
+  }
+
+  /**
    * @return true if data blocks should be written to the cache when an HFile is
    *         written, false if not
    */
@@ -352,11 +375,22 @@ public class CacheConfig {
    * @param cacheDataOnWrite whether data blocks should be written to the cache
    *                         when an HFile is written
    */
+  @VisibleForTesting
   public void setCacheDataOnWrite(boolean cacheDataOnWrite) {
     this.cacheDataOnWrite = cacheDataOnWrite;
   }
 
   /**
+   * Only used for testing.
+   * @param cacheDataInL1 Whether to cache data blocks up in l1 (if a multi-tier cache
+   * implementation).
+   */
+  @VisibleForTesting
+  public void setCacheDataInL1(boolean cacheDataInL1) {
+    this.cacheDataInL1 = cacheDataInL1;
+  }
+
+  /**
    * @return true if index blocks should be written to the cache when an HFile
    *         is written, false if not
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 7564cc2..84f8ea3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -54,18 +54,19 @@ public class CombinedBlockCache implements BlockCache, HeapSize {
   }
 
   @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+      final boolean cacheDataInL1) {
     boolean isMetaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA;
-    if (isMetaBlock) {
-      lruCache.cacheBlock(cacheKey, buf, inMemory);
+    if (isMetaBlock || cacheDataInL1) {
+      lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
     } else {
-      bucketCache.cacheBlock(cacheKey, buf, inMemory);
+      bucketCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
     }
   }
 
   @Override
   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-    cacheBlock(cacheKey, buf, false);
+    cacheBlock(cacheKey, buf, false, false);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
index c7a1c8c..bf3136b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
@@ -80,8 +80,9 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
-    onHeapCache.cacheBlock(cacheKey, buf, inMemory);
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+      final boolean cacheDataInL1) {
+    onHeapCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
     offHeapCache.cacheBlock(cacheKey, buf);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index 209815a..0bfefaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -354,7 +354,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
       // Cache the block
       if (cacheBlock) {
         cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock,
-            cacheConf.isInMemory());
+            cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1());
       }
 
       return metaBlock.getBufferWithoutHeader();
@@ -459,7 +459,8 @@ public class HFileReaderV2 extends AbstractHFileReader {
 
         // Cache the block if necessary
         if (cacheBlock && cacheConf.shouldCacheBlockOnRead(hfileBlock.getBlockType().getCategory())) {
-          cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
+          cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory(),
+            this.cacheConf.isCacheDataInL1());
         }
 
         if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 9c5408f..dd2503c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -57,10 +58,14 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
  *
  * Contains three levels of block priority to allow for
- * scan-resistance and in-memory families.  A block is added with an inMemory
- * flag if necessary, otherwise a block becomes a single access priority.  Once
- * a blocked is accessed again, it changes to multiple access.  This is used
- * to prevent scans from thrashing the cache, adding a least-frequently-used
+ * scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An
+ * in-memory column family is a column family that should be served from memory if possible):
+ * single-access, multiple-accesses, and in-memory priority.
+ * A block is added with an in-memory priority flag if
+ * {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access
+ * priority the first time it is read into this block cache.  If a block is accessed again while
+ * in cache, it is marked as a multiple access priority block.  This delineation of blocks is used
+ * to prevent scans from thrashing the cache adding a least-frequently-used
  * element to the eviction algorithm.<p>
  *
  * Each priority is given its own chunk of the total cache to ensure
@@ -74,10 +79,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * pre-allocating data structures and in initial heap estimation of the map.<p>
  *
  * The detailed constructor defines the sizes for the three priorities (they
- * should total to the maximum size defined).  It also sets the levels that
+ * should total to the <code>maximum size</code> defined).  It also sets the levels that
  * trigger and control the eviction thread.<p>
  *
- * The acceptable size is the cache size level which triggers the eviction
+ * The <code>acceptable size</code> is the cache size level which triggers the eviction
  * process to start.  It evicts enough blocks to get the size below the
  * minimum size specified.<p>
  *
@@ -94,14 +99,23 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
 
   static final Log LOG = LogFactory.getLog(LruBlockCache.class);
 
+  /**
+   * Percentage of total size that eviction will evict until; e.g. if set to .8, then we will keep
+   * evicting during an eviction run till the cache size is down to 80% of the total.
+   */
   static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor";
+
+  /**
+   * Acceptable size of cache (no evictions if size < acceptable)
+   */
   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.acceptable.factor";
+
   static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.single.percentage";
   static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.multi.percentage";
   static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.memory.percentage";
 
   /**
-   * Configuration key to force data-block always(except in-memory are too much)
+   * Configuration key to force data-block always (except in-memory are too much)
    * cached in memory for in-memory hfile, unlike inMemory, which is a column-family
    * configuration, inMemoryForceMode is a cluster-wide configuration
    */
@@ -306,9 +320,11 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
    * @param cacheKey block's cache key
    * @param buf block buffer
    * @param inMemory if block is in-memory
+   * @param cacheDataInL1
    */
   @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+      final boolean cacheDataInL1) {
     LruCachedBlock cb = map.get(cacheKey);
     if(cb != null) {
       // compare the contents, if they are not equal, we are in big trouble
@@ -346,7 +362,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
    * @param buf block buffer
    */
   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-    cacheBlock(cacheKey, buf, false);
+    cacheBlock(cacheKey, buf, false, false);
   }
 
   /**
@@ -920,7 +936,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
     Map<BlockType, Integer> counts =
         new EnumMap<BlockType, Integer>(BlockType.class);
     for (LruCachedBlock cb : map.values()) {
-      BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
+      BlockType blockType = ((Cacheable)cb.getBuffer()).getBlockType();
       Integer count = counts.get(blockType);
       counts.put(blockType, (count == null ? 0 : count) + 1);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 39cbe0b..411cb70 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -290,7 +290,7 @@ public class BucketCache implements BlockCache, HeapSize {
    */
   @Override
   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-    cacheBlock(cacheKey, buf, false);
+    cacheBlock(cacheKey, buf, false, false);
   }
 
   /**
@@ -298,9 +298,11 @@ public class BucketCache implements BlockCache, HeapSize {
    * @param cacheKey block's cache key
    * @param cachedItem block buffer
    * @param inMemory if block is in-memory
+   * @param cacheDataInL1
    */
   @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) {
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
+      final boolean cacheDataInL1) {
     cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
index 383efcc..09b82bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
@@ -294,7 +294,8 @@ public class SingleSizeCache implements BlockCache, HeapSize {
 
   /* Since its offheap, it doesn't matter if its in memory or not */
   @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+      final boolean cacheDataInL1) {
     this.cacheBlock(cacheKey, buf);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
index c4056c4..88c13fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
@@ -241,7 +241,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
    * We don't care about whether its in memory or not, so we just pass the call
    * through.
    */
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+      final boolean cacheDataInL1) {
     cacheBlock(cacheKey, buf);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 8baa34b..972f417 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -115,7 +115,10 @@ public class AccessControlLists {
             10, // Ten is arbitrary number.  Keep versions to help debugging.
             Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
             HConstants.FOREVER, BloomType.NONE.toString(),
-            HConstants.REPLICATION_SCOPE_LOCAL));
+            HConstants.REPLICATION_SCOPE_LOCAL).
+            // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
+            // be the case if we are using CombinedBlockCache (Bucket Cache).
+            setCacheDataInL1(true));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index b9d6e66..0c37b3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -157,7 +158,7 @@ public class TestCacheConfig {
     Cacheable c = new DataCacheEntry();
     // Do asserts on block counting.
     long initialBlockCount = bc.getBlockCount();
-    bc.cacheBlock(bck, c);
+    bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
     assertEquals(doubling? 2: 1, bc.getBlockCount() - initialBlockCount);
     bc.evictBlock(bck);
     assertEquals(initialBlockCount, bc.getBlockCount());
@@ -165,7 +166,7 @@ public class TestCacheConfig {
     // buffers do lazy allocation so sizes are off on first go around.
     if (sizing) {
       long originalSize = bc.getCurrentSize();
-      bc.cacheBlock(bck, c);
+      bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
       long size = bc.getCurrentSize();
       assertTrue(bc.getCurrentSize() > originalSize);
       bc.evictBlock(bck);
@@ -174,6 +175,19 @@ public class TestCacheConfig {
     }
   }
 
+  /**
+   * @param cc
+   * @param filename
+   * @return
+   */
+  private long cacheDataBlock(final CacheConfig cc, final String filename) {
+    BlockCacheKey bck = new BlockCacheKey(filename, 0);
+    Cacheable c = new DataCacheEntry();
+    // Do asserts on block counting.
+    cc.getBlockCache().cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
+    return cc.getBlockCache().getBlockCount();
+  }
+
   @Test
   public void testCacheConfigDefaultLRUBlockCache() {
     CacheConfig cc = new CacheConfig(this.conf);
@@ -202,4 +216,36 @@ public class TestCacheConfig {
     assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
     // TODO: Assert sizes allocated are right and proportions.
   }
-}
+
+  /**
+   * Test the cacheDataInL1 flag.  When set, data blocks should be cached in the l1 tier, up in
+   * LruBlockCache when using CombinedBlockCcahe.
+   */
+  @Test
+  public void testCacheDataInL1() {
+    this.conf.set(CacheConfig.BUCKET_CACHE_IOENGINE_KEY, "offheap");
+    this.conf.setInt(CacheConfig.BUCKET_CACHE_SIZE_KEY, 100);
+    this.conf.setFloat(CacheConfig.BUCKET_CACHE_COMBINED_PERCENTAGE_KEY, 0.8f);
+    CacheConfig cc = new CacheConfig(this.conf);
+    assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
+    CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
+    // Add a data block.  Should go into L2, into the Bucket Cache, not the LruBlockCache.
+    cacheDataBlock(cc, "1");
+    LruBlockCache lrubc = (LruBlockCache)cbc.getBlockCaches()[0];
+    assertDataBlockCount(lrubc, 0);
+    // Enable our test flag.
+    cc.setCacheDataInL1(true);
+    cacheDataBlock(cc, "2");
+    assertDataBlockCount(lrubc, 1);
+    cc.setCacheDataInL1(false);
+    cacheDataBlock(cc, "3");
+    assertDataBlockCount(lrubc, 1);
+  }
+
+  private void assertDataBlockCount(final LruBlockCache bc, final int expected) {
+    Map<BlockType, Integer> blocks = bc.getBlockTypeCountsForTest();
+    assertEquals(expected, blocks == null? 0:
+      blocks.get(BlockType.DATA) == null? 0:
+      blocks.get(BlockType.DATA).intValue());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
index fd72fb5..fa48676 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
@@ -286,7 +286,7 @@ public class TestLruBlockCache {
       cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
 
       // Add memory blocks as such
-      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
+      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false);
       expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize();
 
     }
@@ -321,7 +321,7 @@ public class TestLruBlockCache {
     assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true));
 
     // Insert another memory block
-    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
+    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false);
 
     // Three evictions, three evicted.
     assertEquals(3, cache.getStats().getEvictionCount());
@@ -359,7 +359,7 @@ public class TestLruBlockCache {
     assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true));
 
     // Cache a big memory block
-    cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true);
+    cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true, false);
 
     // Six evictions, twelve evicted (3 new)
     assertEquals(6, cache.getStats().getEvictionCount());
@@ -412,7 +412,7 @@ public class TestLruBlockCache {
     assertEquals(expectedCacheSize, cache.heapSize());
 
     // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1
-    cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true);
+    cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true, false);
     // Single eviction, one block evicted
     assertEquals(1, cache.getStats().getEvictionCount());
     assertEquals(1, cache.getStats().getEvictedCount());
@@ -420,7 +420,7 @@ public class TestLruBlockCache {
     assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true));
 
     // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2
-    cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true);
+    cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true, false);
     // Two evictions, two evicted.
     assertEquals(2, cache.getStats().getEvictionCount());
     assertEquals(2, cache.getStats().getEvictedCount());
@@ -428,10 +428,10 @@ public class TestLruBlockCache {
     assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true));
 
     // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6
-    cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true);
-    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
-    cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true);
-    cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true);
+    cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true, false);
+    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false);
+    cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true, false);
+    cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true, false);
     // Three evictions, three evicted.
     assertEquals(6, cache.getStats().getEvictionCount());
     assertEquals(6, cache.getStats().getEvictedCount());
@@ -443,9 +443,9 @@ public class TestLruBlockCache {
 
     // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted
     // si:mu:me = 0:0:9
-    cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true);
-    cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true);
-    cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true);
+    cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true, false);
+    cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true, false);
+    cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true, false);
     // Three evictions, three evicted.
     assertEquals(9, cache.getStats().getEvictionCount());
     assertEquals(9, cache.getStats().getEvictedCount());
@@ -456,7 +456,7 @@ public class TestLruBlockCache {
 
     // 5. Insert one memory block, the oldest memory evicted
     // si:mu:me = 0:0:9
-    cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true);
+    cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true, false);
     // one eviction, one evicted.
     assertEquals(10, cache.getStats().getEvictionCount());
     assertEquals(10, cache.getStats().getEvictedCount());
@@ -571,7 +571,7 @@ public class TestLruBlockCache {
       cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
 
       // Add memory blocks as such
-      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
+      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false);
     }
 
     // Do not expect any evictions yet

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 403f3d4..c526834 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -94,11 +94,11 @@ public class TestBucketCache {
 
     @Override
     public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf,
-        boolean inMemory) {
+        boolean inMemory, boolean cacheDataInL1) {
       if (super.getBlock(cacheKey, true, false, true) != null) {
         throw new RuntimeException("Cached an already cached block");
       }
-      super.cacheBlock(cacheKey, buf, inMemory);
+      super.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
     }
 
     @Override
@@ -181,5 +181,4 @@ public class TestBucketCache {
     cache.stopWriterThreads();
     CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE);
   }
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index d5b2c95..9f9d9b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -269,7 +269,8 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
+    public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+        boolean cacheDataInL1) {
 
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed3c551/src/main/docbkx/book.xml
----------------------------------------------------------------------
diff --git a/src/main/docbkx/book.xml b/src/main/docbkx/book.xml
index 1af243c..dbf580c 100644
--- a/src/main/docbkx/book.xml
+++ b/src/main/docbkx/book.xml
@@ -1945,14 +1945,15 @@ rs.close();
         <title>Block Cache</title>
 
         <para>HBase provides three different BlockCache implementations: the default onheap
-          LruBlockCache, and BucketCache, and SlabCache, which are both offheap. This section
+          LruBlockCache, and BucketCache, and SlabCache, which are both (usually) offheap. This section
           discusses benefits and drawbacks of each implementation, how to choose the appropriate
           option, and configuration options for each.</para>
         <section>
           <title>Cache Choices</title>
-          <para>LruBlockCache is the original implementation, and is entirely within the Java heap.
-            SlabCache and BucketCache are mainly intended for keeping blockcache data offheap,
-            although BucketCache can also keep data onheap and in files.</para>
+          <para><classname>LruBlockCache</classname> is the original implementation, and is
+              entirely within the Java heap.  <classname>SlabCache</classname> and
+              <classname>BucketCache</classname> are mainly intended for keeping blockcache
+              data offheap, although BucketCache can also keep data onheap and in files.</para>
           <para><emphasis>SlabCache is deprecated and will be removed in 1.0!</emphasis></para>
           <para>BucketCache has seen more production deploys and has more deploy options. Fetching
             will always be slower when fetching from BucketCache or SlabCache, as compared with the
@@ -1961,9 +1962,22 @@ rs.close();
           <para>Anecdotal evidence indicates that BucketCache requires less garbage collection than
             SlabCache so should be even less erratic (than SlabCache or LruBlockCache).</para>
           <para>SlabCache tends to do more garbage collections, because blocks are always moved
-            between L1 and L2, at least given the way DoubleBlockCache currently works. Because the
-            hosting class for each implementation (DoubleBlockCache vs CombinedBlockCache) works so
-            differently, it is difficult to do a fair comparison between BucketCache and SlabCache.
+              between L1 and L2, at least given the way <classname>DoubleBlockCache</classname>
+              currently works. When you enable SlabCache, you are enabling a two tier caching
+              system, an L1 cache which is implemented by an instance of LruBlockCache and
+              an offheap L2 cache which is implemented by SlabCache.  Management of these
+              two tiers and how blocks move between them is done by <classname>DoubleBlockCache</classname>
+              when you are using SlabCache. DoubleBlockCache works by caching all blocks in L1
+              AND L2.  When blocks are evicted from L1, they are moved to L2.  See
+              <xref linkend="offheap.blockcache.slabcache" /> for more detail on how DoubleBlockCache works.
+          </para>
+          <para>The hosting class for BucketCache is <classname>CombinedBlockCache</classname>.
+              It keeps all DATA blocks in the BucketCache and meta blocks -- INDEX and BLOOM blocks --
+              onheap in the L1 <classname>LruBlockCache</classname>.
+          </para>
+          <para>Because the hosting class for each implementation
+              (<classname>DoubleBlockCache</classname> vs <classname>CombinedBlockCache</classname>)
+              works so differently, it is difficult to do a fair comparison between BucketCache and SlabCache.
             See Nick Dimiduk's <link
               xlink:href="http://www.n10k.com/blog/blockcache-101/">BlockCache 101</link> for some
             numbers. See also the description of <link
@@ -1973,7 +1987,15 @@ rs.close();
           <para>For more information about the off heap cache options, see <xref
               linkend="offheap.blockcache" />.</para>
         </section>
-
+        <section xml:id="cache.configurations">
+            <title>General Cache Configurations</title>
+            <para>Apart from the cache implementaiton itself, you can set some general
+                configuration options to control how the cache performs.
+               See <link
+                xlink:href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html" />.
+              After setting any of these options, restart or rolling restart your cluster for the
+              configuration to take effect. Check logs for errors or unexpected behavior.</para>
+      </section>
         <section
           xml:id="block.cache.design">
           <title>LruBlockCache Design</title>
@@ -1991,11 +2013,15 @@ rs.close();
                 again, it upgrades to this priority. It is thus part of the second group considered
                 during evictions.</para>
             </listitem>
-            <listitem>
+            <listitem xml_id="hbase.cache.inmemory">
               <para>In-memory access priority: If the block's family was configured to be
                 "in-memory", it will be part of this priority disregarding the number of times it
                 was accessed. Catalog tables are configured like this. This group is the last one
                 considered during evictions.</para>
+            <para>To mark a column family as in-memory, call
+                <programlisting>HColumnDescriptor.setInMemory(true);</programlisting> if creating a table from java,
+                or set <command>IN_MEMORY => true</command> when creating or altering a table in
+                the shell: e.g.  <programlisting>hbase(main):003:0> create  't', {NAME => 'f', IN_MEMORY => 'true'}</programlisting></para>
             </listitem>
           </itemizedlist>
           <para> For more information, see the <link
@@ -2082,7 +2108,8 @@ rs.close();
           <para>Currently the recommended way to measure HFile indexes and bloom filters sizes is to
             look at the region server web UI and checkout the relevant metrics. For keys, sampling
             can be done by using the HFile command line tool and look for the average key size
-            metric. </para>
+            metric. Since HBase 0.98.3, you can view detail on BlockCache stats and metrics
+            in a special Block Cache section in the UI.</para>
           <para>It's generally bad to use block caching when the WSS doesn't fit in memory. This is
             the case when you have for example 40GB available across all your region servers' block
             caches but you need to process 1TB of data. One of the reasons is that the churn
@@ -2111,14 +2138,15 @@ rs.close();
         <section
           xml:id="offheap.blockcache">
           <title>Offheap Block Cache</title>
-          <section>
+          <section xml:id="offheap.blockcache.slabcache">
             <title>Enable SlabCache</title>
             <para><emphasis>SlabCache is deprecated and will be removed in 1.0!</emphasis></para>
             <para> SlabCache is originally described in <link
                 xlink:href="http://blog.cloudera.com/blog/2012/01/caching-in-hbase-slabcache/">Caching
                 in Apache HBase: SlabCache</link>. Quoting from the API documentation for <link
                 xlink:href="http://hbase.apache.org/0.94/apidocs/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.html">DoubleBlockCache</link>,
-              it is an abstraction layer that combines two caches, the smaller onHeapCache and the
+              the hosting class for SlabCache deploys,
+              DoubleBlockCache is an abstraction layer that combines two caches, the smaller onHeapCache and the
               larger offHeapCache. CacheBlock attempts to cache the block in both caches, while
               readblock reads first from the faster on heap cache before looking for the block in
               the off heap cache. Metrics are the combined size and hits and misses of both
@@ -2138,22 +2166,41 @@ rs.close();
             <title>Enable BucketCache</title>
             <para> To enable BucketCache, set the value of
                 <varname>hbase.offheapcache.percentage</varname> to 0 in the RegionServer's
-                <filename>hbase-site.xml</filename> file. This disables SlabCache. Next, set the
-              various options for BucketCache to values appropriate to your situation. You can find
-              more information about all of the (more than 26) options at <link
-                xlink:href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html" />.
-              After setting the options, restart or rolling restart your cluster for the
-              configuration to take effect. Check logs for errors or unexpected behavior.</para>
-            <para>The offheap and onheap caches are managed by <link
+                <filename>hbase-site.xml</filename> file. This disables SlabCache.
+
+                <para>Just as for SlabCache, the usual deploy of BucketCache is via a
+                    managing class that sets up two caching tiers: an L1 onheap cache
+                    implemented by LruBlockCache and a second L2 cache implemented
+                    with BucketCache. The managing class is <link
                 xlink:href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.html">CombinedBlockCache</link>
-              by default. The link describes the mechanism of CombinedBlockCache. To disable
+            by default. The just-previous link describes the mechanism of CombinedBlockCache. In short, it works
+            by keeping meta blocks -- INDEX and BLOOM in the L1, onheap LruBlockCache tier -- and DATA
+            blocks are kept in the L2, BucketCache tier. It is possible to amend this behavior in
+            HBase since version 1.0 and ask that a column family have both its meta and DATA blocks hosted onheap in the L1 tier by
+            setting <varname>cacheDataInL1</varname> via <programlisting>(HColumnDescriptor.setCacheDataInL1(true)</programlisting>
+            or in the shell, creating or amending column families setting <varname>CACHE_DATA_IN_L1</varname>
+            to true: e.g. <programlisting>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</programlisting></para>
+    </para>
+        <para>The BucketCache deploy can be
+            onheap, offheap, or file based. You set which via the
+            <varname>hbase.bucketcache.ioengine</varname> setting it to
+            <varname>heap</varname> for BucketCache running as part of the java heap,
+            <varname>offheap</varname> for BucketCache to make allocations offheap,
+            and <varname>file:PATH_TO_FILE</varname> for BucketCache to use a file
+            (Useful in particular if you have some fast i/o attached to the box such
+            as SSDs).
+        </para>
+        <para>To disable
               CombinedBlockCache, and use the BucketCache as a strict L2 cache to the L1
               LruBlockCache, set <varname>CacheConfig.BUCKET_CACHE_COMBINED_KEY</varname> to
                 <literal>false</literal>. In this mode, on eviction from L1, blocks go to L2.</para>
+
             <para> By default, <varname>CacheConfig.BUCKET_CACHE_COMBINED_PERCENTAGE_KEY</varname>
               defaults to <literal>0.9</literal>. This means that whatever size you set for the
               bucket cache with <varname>CacheConfig.BUCKET_CACHE_SIZE_KEY</varname>, 90% will be
               used for offheap and 10% will be used by the onheap LruBlockCache. </para>
+          <para>
+          </para>
             <procedure>
               <title>BucketCache Example Configuration</title>
               <para> This sample provides a configuration for a 4 GB offheap BucketCache with a 1 GB