You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2014/07/02 20:19:05 UTC

git commit: HBASE-11372 Remove SlabCache

Repository: hbase
Updated Branches:
  refs/heads/master 9ff90931d -> 20cac213a


HBASE-11372 Remove SlabCache


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/20cac213
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/20cac213
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/20cac213

Branch: refs/heads/master
Commit: 20cac213aff85ee1380aef6d42a1042c2bbe5901
Parents: 9ff9093
Author: Nick Dimiduk <nd...@apache.org>
Authored: Mon Jun 30 19:18:46 2014 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Jul 2 10:17:31 2014 -0700

----------------------------------------------------------------------
 .../tmpl/regionserver/BlockCacheTmpl.jamon      |  47 +-
 .../tmpl/regionserver/BlockCacheViewTmpl.jamon  |   2 -
 .../hadoop/hbase/io/hfile/CacheConfig.java      | 108 ++--
 .../hadoop/hbase/io/hfile/DoubleBlockCache.java | 184 -------
 .../hbase/io/hfile/slab/SingleSizeCache.java    | 356 -------------
 .../apache/hadoop/hbase/io/hfile/slab/Slab.java | 137 -----
 .../hadoop/hbase/io/hfile/slab/SlabCache.java   | 514 -------------------
 .../io/hfile/slab/SlabItemActionWatcher.java    |  49 --
 .../hbase/mapreduce/TableMapReduceUtil.java     |   2 -
 .../hbase/io/hfile/TestBlockCacheReporting.java |  17 -
 .../hadoop/hbase/io/hfile/TestCacheConfig.java  |   9 -
 .../io/hfile/slab/TestSingleSizeCache.java      |  83 ---
 .../hadoop/hbase/io/hfile/slab/TestSlab.java    |  77 ---
 .../hbase/io/hfile/slab/TestSlabCache.java      | 111 ----
 14 files changed, 47 insertions(+), 1649 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 1aed29a..80391b2 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -43,8 +43,6 @@ org.apache.hadoop.hbase.io.hfile.bucket.BucketCacheStats;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket;
-org.apache.hadoop.hbase.io.hfile.slab.SlabCache;
-org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache;
 org.apache.hadoop.util.StringUtils;
 </%import>
 <div class="tabbable">
@@ -264,17 +262,12 @@ are combined counts. Request count is sum of hits and misses.</p>
     org.apache.hadoop.hbase.io.hfile.BlockCacheUtil.getLoadedCachedBlocksByFile(config, bc);
   AgeSnapshot snapshot = cbsbf.getAgeSnapshot();
 
-  boolean slabCache = bc.getClass().getSimpleName().equals("SlabCache");
-  Map<Integer, SingleSizeCache> sizer = null;
-
   boolean bucketCache = bc.getClass().getSimpleName().equals("BucketCache");
   BucketCacheStats bucketCacheStats = null;
   BucketAllocator bucketAllocator = null;
   Bucket [] buckets = null;
 
-  if (slabCache) {
-    sizer = ((SlabCache)bc).getSizer();
-  } else if (bucketCache) {
+  if (bucketCache) {
     bucketCacheStats = (BucketCacheStats)bc.getStats();
     bucketAllocator = ((BucketCache)bc).getAllocator();
     buckets = bucketAllocator.getBuckets();
@@ -325,8 +318,7 @@ are combined counts. Request count is sum of hits and misses.</p>
         <td>Size of DATA Blocks</td>
     </tr>
 </%if>
-<%doc>Can't do age of block in cache when slab cache</%doc>
-<%if !slabCache %>
+
     <tr>
         <td>Mean</td>
         <td><% String.format("%,d", (long)(snapshot.getMean()/nanosPerSecond)) %></td>
@@ -357,7 +349,7 @@ are combined counts. Request count is sum of hits and misses.</p>
         <td><% String.format("%,d", (long)(snapshot.get99thPercentile()/nanosPerSecond)) %></td>
         <td>99th percentile of age of Blocks in cache (seconds)</td>
     </tr>
-</%if>
+
 <%if bucketCache %>
     <tr>
         <td>Hits per Second</td>
@@ -392,39 +384,6 @@ are combined counts. Request count is sum of hits and misses.</p>
     </tr>
 </%for>
 </table>
-<%elseif slabCache %>
-<p>SlabCache does not keep account of block ages so can not show stats on how long blocks have been cached.</p>
-<h3>SlabCache Slabs</h3>
-<table class="table table-striped">
-    <tr>
-        <th>Block Size</th>
-        <th>Size</th>
-        <th>Free Size</th>
-        <th>Count</th>
-        <th>Evicted</th>
-        <th>Evictions</th>
-        <th>Hits</th>
-        <th>Caching</th>
-        <th>Misses</th>
-        <th>Caching</th>
-        <th>Hit Ratio</th>
-    </tr>
-<%for Map.Entry<Integer, SingleSizeCache> e: sizer.entrySet() %>
-    <tr>
-        <td><% StringUtils.humanReadableInt(e.getKey()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().size()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getFreeSize()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getBlockCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getEvictedCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getEvictionCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getHitCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getHitCachingCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getMissCount()) %></td>
-        <td><% StringUtils.humanReadableInt(e.getValue().getStats().getMissCachingCount()) %></td>
-        <td><% String.format("%,.2f", e.getValue().getStats().getHitRatio() * 100) %><% "%" %></td>
-    </tr>
-</%for>
-</table>
 </%if>
 <%java>
 cbsbf = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
index 50da12a..d6a25ad 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
@@ -36,8 +36,6 @@ org.apache.hadoop.hbase.io.hfile.bucket.BucketCacheStats;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket;
-org.apache.hadoop.hbase.io.hfile.slab.SlabCache;
-org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache;
 org.apache.hadoop.util.StringUtils;
 com.yammer.metrics.stats.Snapshot;
 </%import>

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index cf98102..2e9a41d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -29,8 +29,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
-import org.apache.hadoop.hbase.io.hfile.slab.SlabCache;
-import org.apache.hadoop.hbase.util.DirectMemoryUtils;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -139,19 +137,6 @@ public class CacheConfig {
   public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64;
   public static final float DEFAULT_BUCKET_CACHE_COMBINED_PERCENTAGE = 0.9f;
 
-  /**
-   * Setting this float to a non-null value turns on {@link DoubleBlockCache}
-   * which makes use of the {@link LruBlockCache} and {@link SlabCache}.
-   * 
-   * The float value of between 0 and 1 will be multiplied against the setting for
-   * <code>-XX:MaxDirectMemorySize</code> to figure what size of the offheap allocation to give
-   * over to slab cache.
-   * 
-   * Slab cache has been little used and is likely to be deprecated in the near future.
-   */
-  public static final String SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY =
-    "hbase.offheapcache.percentage";
-
  /**
    * Configuration key to prefetch all blocks of a given file into the block cache
    * when the file is opened.
@@ -491,52 +476,31 @@ public class CacheConfig {
     MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
     long lruCacheSize = (long) (mu.getMax() * cachePercentage);
     int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
-    long slabCacheOffHeapCacheSize =
-      conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0) == 0?
-      0:
-      (long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) *
-          DirectMemoryUtils.getDirectMemorySize());
-    if (slabCacheOffHeapCacheSize <= 0) {
-      String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null);
-      float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F);
-      // A percentage of max heap size or a absolute value with unit megabytes
-      long bucketCacheSize = (long) (bucketCachePercentage < 1 ? mu.getMax()
-          * bucketCachePercentage : bucketCachePercentage * 1024 * 1024);
-
-      boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
-          DEFAULT_BUCKET_CACHE_COMBINED);
-      BucketCache bucketCache = null;
-      if (bucketCacheIOEngineName != null && bucketCacheSize > 0) {
-        int writerThreads = conf.getInt(BUCKET_CACHE_WRITER_THREADS_KEY,
-            DEFAULT_BUCKET_CACHE_WRITER_THREADS);
-        int writerQueueLen = conf.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY,
-            DEFAULT_BUCKET_CACHE_WRITER_QUEUE);
-        String persistentPath = conf.get(BUCKET_CACHE_PERSISTENT_PATH_KEY);
-        float combinedPercentage = conf.getFloat(
-            BUCKET_CACHE_COMBINED_PERCENTAGE_KEY,
-            DEFAULT_BUCKET_CACHE_COMBINED_PERCENTAGE);
-        String[] configuredBucketSizes = conf.getStrings(BUCKET_CACHE_BUCKETS_KEY);
-        int[] bucketSizes = null;
-        if (configuredBucketSizes != null) {
-          bucketSizes = new int[configuredBucketSizes.length];
-          for (int i = 0; i < configuredBucketSizes.length; i++) {
-            bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i]);
-          }
-        }
-        if (combinedWithLru) {
-          lruCacheSize = (long) ((1 - combinedPercentage) * bucketCacheSize);
-          bucketCacheSize = (long) (combinedPercentage * bucketCacheSize);
-        }
-        try {
-          int ioErrorsTolerationDuration = conf.getInt(
-              "hbase.bucketcache.ioengine.errors.tolerated.duration",
-              BucketCache.DEFAULT_ERROR_TOLERATION_DURATION);
-          bucketCache = new BucketCache(bucketCacheIOEngineName,
-              bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath,
-              ioErrorsTolerationDuration);
-        } catch (IOException ioex) {
-          LOG.error("Can't instantiate bucket cache", ioex);
-          throw new RuntimeException(ioex);
+
+    String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null);
+    float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F);
+    // A percentage of max heap size or a absolute value with unit megabytes
+    long bucketCacheSize = (long) (bucketCachePercentage < 1 ? mu.getMax()
+      * bucketCachePercentage : bucketCachePercentage * 1024 * 1024);
+
+    boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
+      DEFAULT_BUCKET_CACHE_COMBINED);
+    BucketCache bucketCache = null;
+    if (bucketCacheIOEngineName != null && bucketCacheSize > 0) {
+      int writerThreads = conf.getInt(BUCKET_CACHE_WRITER_THREADS_KEY,
+        DEFAULT_BUCKET_CACHE_WRITER_THREADS);
+      int writerQueueLen = conf.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY,
+        DEFAULT_BUCKET_CACHE_WRITER_QUEUE);
+      String persistentPath = conf.get(BUCKET_CACHE_PERSISTENT_PATH_KEY);
+      float combinedPercentage = conf.getFloat(
+        BUCKET_CACHE_COMBINED_PERCENTAGE_KEY,
+        DEFAULT_BUCKET_CACHE_COMBINED_PERCENTAGE);
+      String[] configuredBucketSizes = conf.getStrings(BUCKET_CACHE_BUCKETS_KEY);
+      int[] bucketSizes = null;
+      if (configuredBucketSizes != null) {
+        bucketSizes = new int[configuredBucketSizes.length];
+        for (int i = 0; i < configuredBucketSizes.length; i++) {
+          bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i]);
         }
       }
       LOG.info("Allocating LruBlockCache size=" +
@@ -548,10 +512,26 @@ public class CacheConfig {
       } else {
         GLOBAL_BLOCK_CACHE_INSTANCE = lruCache;
       }
+      try {
+        int ioErrorsTolerationDuration = conf.getInt(
+          "hbase.bucketcache.ioengine.errors.tolerated.duration",
+          BucketCache.DEFAULT_ERROR_TOLERATION_DURATION);
+        bucketCache = new BucketCache(bucketCacheIOEngineName,
+          bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath,
+          ioErrorsTolerationDuration);
+      } catch (IOException ioex) {
+        LOG.error("Can't instantiate bucket cache", ioex);
+        throw new RuntimeException(ioex);
+      }
+    }
+    LOG.info("Allocating LruBlockCache size=" +
+      StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
+    LruBlockCache lruCache = new LruBlockCache(lruCacheSize, blockSize);
+    lruCache.setVictimCache(bucketCache);
+    if (bucketCache != null && combinedWithLru) {
+      GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache);
     } else {
-      LOG.warn("SlabCache is deprecated. Consider BucketCache as a replacement.");
-      GLOBAL_BLOCK_CACHE_INSTANCE = new DoubleBlockCache(
-          lruCacheSize, slabCacheOffHeapCacheSize, blockSize, blockSize, conf);
+      GLOBAL_BLOCK_CACHE_INSTANCE = lruCache;
     }
     return GLOBAL_BLOCK_CACHE_INSTANCE;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
deleted file mode 100644
index bf3136b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.hfile.slab.SlabCache;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * DoubleBlockCache is an abstraction layer that combines two caches, the
- * smaller onHeapCache and the larger offHeapCache. CacheBlock attempts to cache
- * the block in both caches, while readblock reads first from the faster on heap
- * cache before looking for the block in the off heap cache. Metrics are the
- * combined size and hits and misses of both caches.
- *
- * @deprecated As of 1.0, replaced by {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}.
- */
-@InterfaceAudience.Private
-@Deprecated
-public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
-
-  static final Log LOG = LogFactory.getLog(DoubleBlockCache.class.getName());
-
-  private final LruBlockCache onHeapCache;
-  private final SlabCache offHeapCache;
-  private final CacheStats stats;
-
-  /**
-   * Default constructor. Specify maximum size and expected average block size
-   * (approximation is fine).
-   * <p>
-   * All other factors will be calculated based on defaults specified in this
-   * class.
-   *
-   * @param onHeapSize maximum size of the onHeapCache, in bytes.
-   * @param offHeapSize maximum size of the offHeapCache, in bytes.
-   * @param onHeapBlockSize average block size of the on heap cache.
-   * @param offHeapBlockSize average block size for the off heap cache
-   * @param conf configuration file. currently used only by the off heap cache.
-   */
-  public DoubleBlockCache(long onHeapSize, long offHeapSize,
-      long onHeapBlockSize, long offHeapBlockSize, Configuration conf) {
-
-    LOG.info("Creating on-heap cache of size "
-        + StringUtils.byteDesc(onHeapSize)
-        + " with an average block size of "
-        + StringUtils.byteDesc(onHeapBlockSize));
-    onHeapCache = new LruBlockCache(onHeapSize, onHeapBlockSize, conf);
-
-    LOG.info("Creating off-heap cache of size "
-        + StringUtils.byteDesc(offHeapSize)
-        + "with an average block size of "
-        + StringUtils.byteDesc(offHeapBlockSize));
-    offHeapCache = new SlabCache(offHeapSize, offHeapBlockSize);
-
-    offHeapCache.addSlabByConf(conf);
-    this.stats = new CacheStats();
-  }
-
-  @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
-      final boolean cacheDataInL1) {
-    onHeapCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
-    offHeapCache.cacheBlock(cacheKey, buf);
-  }
-
-  @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-    onHeapCache.cacheBlock(cacheKey, buf);
-    offHeapCache.cacheBlock(cacheKey, buf);
-  }
-
-  @Override
-  public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
-      boolean updateCacheMetrics) {
-    Cacheable cachedBlock;
-
-    if ((cachedBlock = onHeapCache.getBlock(cacheKey, caching, repeat,
-        updateCacheMetrics)) != null) {
-      if (updateCacheMetrics) stats.hit(caching);
-      return cachedBlock;
-
-    } else if ((cachedBlock = offHeapCache.getBlock(cacheKey, caching, repeat,
-        updateCacheMetrics)) != null) {
-      if (caching) {
-        onHeapCache.cacheBlock(cacheKey, cachedBlock);
-      }
-      if (updateCacheMetrics) stats.hit(caching);
-      return cachedBlock;
-    }
-
-    if (!repeat && updateCacheMetrics) stats.miss(caching);
-    return null;
-  }
-
-  @Override
-  public boolean evictBlock(BlockCacheKey cacheKey) {
-    stats.evict();
-    boolean cacheA = onHeapCache.evictBlock(cacheKey);
-    boolean cacheB = offHeapCache.evictBlock(cacheKey);
-    boolean evicted = cacheA || cacheB;
-    if (evicted) {
-      stats.evicted();
-    }
-    return evicted;
-  }
-
-  @Override
-  public CacheStats getStats() {
-    return this.stats;
-  }
-
-  @Override
-  public void shutdown() {
-    onHeapCache.shutdown();
-    offHeapCache.shutdown();
-  }
-
-  @Override
-  public long heapSize() {
-    return onHeapCache.heapSize() + offHeapCache.heapSize();
-  }
-
-  public long size() {
-    return onHeapCache.size() + offHeapCache.size();
-  }
-
-  public long getFreeSize() {
-    return onHeapCache.getFreeSize() + offHeapCache.getFreeSize();
-  }
-
-  public long getCurrentSize() {
-    return onHeapCache.getCurrentSize() + offHeapCache.getCurrentSize();
-  }
-
-  @Override
-  public int evictBlocksByHfileName(String hfileName) {
-    onHeapCache.evictBlocksByHfileName(hfileName);
-    offHeapCache.evictBlocksByHfileName(hfileName);
-    return 0;
-  }
-
-  @Override
-  public long getBlockCount() {
-    return onHeapCache.getBlockCount() + offHeapCache.getBlockCount();
-  }
-
-  @Override
-  public void setMaxSize(long size) {
-    this.onHeapCache.setMaxSize(size);
-  }
-
-  @Override
-  public Iterator<CachedBlock> iterator() {
-    return new BlockCachesIterator(getBlockCaches());
-  }
-
-  @Override
-  public BlockCache[] getBlockCaches() {
-    return new BlockCache [] {this.onHeapCache, this.offHeapCache};
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
deleted file mode 100644
index 09b82bf..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
+++ /dev/null
@@ -1,356 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-import org.apache.hadoop.hbase.io.hfile.CacheStats;
-import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-import org.apache.hadoop.hbase.io.hfile.CachedBlock;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-
-/**
- * SingleSizeCache is a slab allocated cache that caches elements up to a single
- * size. It uses a slab allocator (Slab.java) to divide a direct bytebuffer,
- * into evenly sized blocks. Any cached data will take up exactly 1 block. An
- * exception will be thrown if the cached data cannot fit into the blockSize of
- * this SingleSizeCache.
- *
- * Eviction and LRUness is taken care of by Guava's MapMaker, which creates a
- * ConcurrentLinkedHashMap.
- *
- * @deprecated As of 1.0, replaced by {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}.
- **/
-@InterfaceAudience.Private
-@Deprecated
-public class SingleSizeCache implements BlockCache, HeapSize {
-  private final Slab backingStore;
-  private final ConcurrentMap<BlockCacheKey, CacheablePair> backingMap;
-  private final int numBlocks;
-  private final int blockSize;
-  private final CacheStats stats;
-  private final SlabItemActionWatcher actionWatcher;
-  private final AtomicLong size;
-  private final AtomicLong timeSinceLastAccess;
-  public final static long CACHE_FIXED_OVERHEAD = ClassSize
-      .align((2 * Bytes.SIZEOF_INT) + (5 * ClassSize.REFERENCE)
-          + +ClassSize.OBJECT);
-
-  static final Log LOG = LogFactory.getLog(SingleSizeCache.class);
-
-  /**
-   * Default constructor. Specify the size of the blocks, number of blocks, and
-   * the SlabCache this cache will be assigned to.
-   *
-   *
-   * @param blockSize the size of each block, in bytes
-   *
-   * @param numBlocks the number of blocks of blockSize this cache will hold.
-   *
-   * @param master the SlabCache this SingleSlabCache is assigned to.
-   */
-  public SingleSizeCache(int blockSize, int numBlocks,
-      SlabItemActionWatcher master) {
-    this.blockSize = blockSize;
-    this.numBlocks = numBlocks;
-    backingStore = new Slab(blockSize, numBlocks);
-    this.stats = new CacheStats();
-    this.actionWatcher = master;
-    this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
-    this.timeSinceLastAccess = new AtomicLong();
-
-    // This evictionListener is called whenever the cache automatically
-    // evicts something.
-    RemovalListener<BlockCacheKey, CacheablePair> listener =
-      new RemovalListener<BlockCacheKey, CacheablePair>() {
-        @Override
-        public void onRemoval(
-            RemovalNotification<BlockCacheKey, CacheablePair> notification) {
-          if (!notification.wasEvicted()) {
-            // Only process removals by eviction, not by replacement or
-            // explicit removal
-            return;
-          }
-          CacheablePair value = notification.getValue();
-          timeSinceLastAccess.set(System.nanoTime()
-              - value.recentlyAccessed.get());
-          stats.evict();
-          doEviction(notification.getKey(), value);
-        }
-      };
-
-    backingMap = CacheBuilder.newBuilder()
-        .maximumSize(numBlocks - 1)
-        .removalListener(listener)
-        .<BlockCacheKey, CacheablePair>build()
-        .asMap();
-  }
-
-  @Override
-  public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
-    ByteBuffer storedBlock;
-
-    try {
-      storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
-    } catch (InterruptedException e) {
-      LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
-      LOG.warn(e);
-      return;
-    }
-
-    CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(),
-        storedBlock);
-    toBeCached.serialize(storedBlock);
-
-    synchronized (this) {
-      CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);
-
-      if (alreadyCached != null) {
-        backingStore.free(storedBlock);
-        throw new RuntimeException("already cached " + blockName);
-      }
-      if (actionWatcher != null) {
-        actionWatcher.onInsertion(blockName, this);
-      }
-    }
-    newEntry.recentlyAccessed.set(System.nanoTime());
-    this.size.addAndGet(newEntry.heapSize());
-  }
-
-  @Override
-  public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
-      boolean updateCacheMetrics) {
-    CacheablePair contentBlock = backingMap.get(key);
-    if (contentBlock == null) {
-      if (!repeat && updateCacheMetrics) stats.miss(caching);
-      return null;
-    }
-
-    if (updateCacheMetrics) stats.hit(caching);
-    // If lock cannot be obtained, that means we're undergoing eviction.
-    try {
-      contentBlock.recentlyAccessed.set(System.nanoTime());
-      synchronized (contentBlock) {
-        if (contentBlock.serializedData == null) {
-          // concurrently evicted
-          LOG.warn("Concurrent eviction of " + key);
-          return null;
-        }
-        return contentBlock.deserializer
-            .deserialize(contentBlock.serializedData.asReadOnlyBuffer());
-      }
-    } catch (Throwable t) {
-      LOG.error("Deserializer threw an exception. This may indicate a bug.", t);
-      return null;
-    }
-  }
-
-  /**
-   * Evicts the block
-   *
-   * @param key the key of the entry we are going to evict
-   * @return the evicted ByteBuffer
-   */
-  public boolean evictBlock(BlockCacheKey key) {
-    stats.evict();
-    CacheablePair evictedBlock = backingMap.remove(key);
-
-    if (evictedBlock != null) {
-      doEviction(key, evictedBlock);
-    }
-    return evictedBlock != null;
-  }
-
-  private void doEviction(BlockCacheKey key, CacheablePair evictedBlock) {
-    long evictedHeap = 0;
-    synchronized (evictedBlock) {
-      if (evictedBlock.serializedData == null) {
-        // someone else already freed
-        return;
-      }
-      evictedHeap = evictedBlock.heapSize();
-      ByteBuffer bb = evictedBlock.serializedData;
-      evictedBlock.serializedData = null;
-      backingStore.free(bb);
-
-      // We have to do this callback inside the synchronization here.
-      // Otherwise we can have the following interleaving:
-      // Thread A calls getBlock():
-      // SlabCache directs call to this SingleSizeCache
-      // It gets the CacheablePair object
-      // Thread B runs eviction
-      // doEviction() is called and sets serializedData = null, here.
-      // Thread A sees the null serializedData, and returns null
-      // Thread A calls cacheBlock on the same block, and gets
-      // "already cached" since the block is still in backingStore
-
-      if (actionWatcher != null) {
-        actionWatcher.onEviction(key, this);
-      }
-    }
-    stats.evicted();
-    size.addAndGet(-1 * evictedHeap);
-  }
-
-  public void logStats() {
-
-    long milliseconds = this.timeSinceLastAccess.get() / 1000000;
-
-    LOG.info("For Slab of size " + this.blockSize + ": "
-        + this.getOccupiedSize() / this.blockSize
-        + " occupied, out of a capacity of " + this.numBlocks
-        + " blocks. HeapSize is "
-        + StringUtils.humanReadableInt(this.heapSize()) + " bytes." + ", "
-        + "churnTime=" + StringUtils.formatTime(milliseconds));
-
-    LOG.info("Slab Stats: " + "accesses="
-        + stats.getRequestCount()
-        + ", "
-        + "hits="
-        + stats.getHitCount()
-        + ", "
-        + "hitRatio="
-        + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(
-            stats.getHitRatio(), 2) + "%, "))
-        + "cachingAccesses="
-        + stats.getRequestCachingCount()
-        + ", "
-        + "cachingHits="
-        + stats.getHitCachingCount()
-        + ", "
-        + "cachingHitsRatio="
-        + (stats.getHitCachingCount() == 0 ? "0" : (StringUtils.formatPercent(
-            stats.getHitCachingRatio(), 2) + "%, ")) + "evictions="
-        + stats.getEvictionCount() + ", " + "evicted="
-        + stats.getEvictedCount() + ", " + "evictedPerRun="
-        + stats.evictedPerEviction());
-
-  }
-
-  public void shutdown() {
-    backingStore.shutdown();
-  }
-
-  public long heapSize() {
-    return this.size.get() + backingStore.heapSize();
-  }
-
-  public long size() {
-    return (long) this.blockSize * (long) this.numBlocks;
-  }
-
-  public long getFreeSize() {
-    return (long) backingStore.getBlocksRemaining() * (long) blockSize;
-  }
-
-  public long getOccupiedSize() {
-    return (long) (numBlocks - backingStore.getBlocksRemaining()) * (long) blockSize;
-  }
-
-  public long getEvictedCount() {
-    return stats.getEvictedCount();
-  }
-
-  public CacheStats getStats() {
-    return this.stats;
-  }
-
-  @Override
-  public long getBlockCount() {
-    return numBlocks - backingStore.getBlocksRemaining();
-  }
-
-  /* Since its offheap, it doesn't matter if its in memory or not */
-  @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
-      final boolean cacheDataInL1) {
-    this.cacheBlock(cacheKey, buf);
-  }
-
-  /*
-   * This is never called, as evictions are handled in the SlabCache layer,
-   * implemented in the event we want to use this as a standalone cache.
-   */
-  @Override
-  public int evictBlocksByHfileName(String hfileName) {
-    int evictedCount = 0;
-    for (BlockCacheKey e : backingMap.keySet()) {
-      if (e.getHfileName().equals(hfileName)) {
-        this.evictBlock(e);
-      }
-    }
-    return evictedCount;
-  }
-
-  @Override
-  public long getCurrentSize() {
-    return 0;
-  }
-
-  /* Just a pair class, holds a reference to the parent cacheable */
-  private static class CacheablePair implements HeapSize {
-    final CacheableDeserializer<Cacheable> deserializer;
-    ByteBuffer serializedData;
-    AtomicLong recentlyAccessed;
-
-    private CacheablePair(CacheableDeserializer<Cacheable> deserializer,
-        ByteBuffer serializedData) {
-      this.recentlyAccessed = new AtomicLong();
-      this.deserializer = deserializer;
-      this.serializedData = serializedData;
-    }
-
-    /*
-     * Heapsize overhead of this is the default object overhead, the heapsize of
-     * the serialized object, and the cost of a reference to the bytebuffer,
-     * which is already accounted for in SingleSizeCache
-     */
-    @Override
-    public long heapSize() {
-      return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE * 3
-          + ClassSize.ATOMIC_LONG);
-    }
-  }
-
-  @Override
-  public Iterator<CachedBlock> iterator() {
-    return null;
-  }
-
-  @Override
-  public BlockCache[] getBlockCaches() {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/Slab.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/Slab.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/Slab.java
deleted file mode 100644
index 637890f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/Slab.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import java.nio.ByteBuffer;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.DirectMemoryUtils;
-import com.google.common.base.Preconditions;
-
-/**
- * Slab is a class which is designed to allocate blocks of a certain size.
- * Constructor creates a number of DirectByteBuffers and slices them into the
- * requisite size, then puts them all in a buffer.
- *
- * @deprecated As of 1.0, replaced by {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}.
- */
-@InterfaceAudience.Private
-@Deprecated
-class Slab implements org.apache.hadoop.hbase.io.HeapSize {
-  static final Log LOG = LogFactory.getLog(Slab.class);
-
-  /** This is where our items, or blocks of the slab, are stored. */
-  private LinkedBlockingQueue<ByteBuffer> buffers;
-
-  /** This is where our Slabs are stored */
-  private ConcurrentLinkedQueue<ByteBuffer> slabs;
-
-  private final int blockSize;
-  private final int numBlocks;
-  private long heapSize;
-
-  Slab(int blockSize, int numBlocks) {
-    buffers = new LinkedBlockingQueue<ByteBuffer>();
-    slabs = new ConcurrentLinkedQueue<ByteBuffer>();
-
-    this.blockSize = blockSize;
-    this.numBlocks = numBlocks;
-
-    this.heapSize = ClassSize.estimateBase(this.getClass(), false);
-
-    int maxBlocksPerSlab = Integer.MAX_VALUE / blockSize;
-    int maxSlabSize = maxBlocksPerSlab * blockSize;
-
-    int numFullSlabs = numBlocks / maxBlocksPerSlab;
-    int partialSlabSize = (numBlocks % maxBlocksPerSlab) * blockSize;
-    for (int i = 0; i < numFullSlabs; i++) {
-      allocateAndSlice(maxSlabSize, blockSize);
-    }
-
-    if (partialSlabSize > 0) {
-      allocateAndSlice(partialSlabSize, blockSize);
-    }
-  }
-
-  private void allocateAndSlice(int size, int sliceSize) {
-    ByteBuffer newSlab = ByteBuffer.allocateDirect(size);
-    slabs.add(newSlab);
-    for (int j = 0; j < newSlab.capacity(); j += sliceSize) {
-      newSlab.limit(j + sliceSize).position(j);
-      ByteBuffer aSlice = newSlab.slice();
-      buffers.add(aSlice);
-      heapSize += ClassSize.estimateBase(aSlice.getClass(), false);
-    }
-  }
-
-  /*
-   * Shutdown deallocates the memory for all the DirectByteBuffers. Each
-   * DirectByteBuffer has a "cleaner" method, which is similar to a
-   * deconstructor in C++.
-   */
-  void shutdown() {
-    for (ByteBuffer aSlab : slabs) {
-      try {
-        DirectMemoryUtils.destroyDirectByteBuffer(aSlab);
-      } catch (Exception e) {
-        LOG.warn("Unable to deallocate direct memory during shutdown", e);
-      }
-    }
-  }
-
-  int getBlockSize() {
-    return this.blockSize;
-  }
-
-  int getBlockCapacity() {
-    return this.numBlocks;
-  }
-
-  int getBlocksRemaining() {
-    return this.buffers.size();
-  }
-
-  /*
-   * Throws an exception if you try to allocate a
-   * bigger size than the allocator can handle. Alloc will block until a buffer is available.
-   */
-  ByteBuffer alloc(int bufferSize) throws InterruptedException {
-    int newCapacity = Preconditions.checkPositionIndex(bufferSize, blockSize);
-
-    ByteBuffer returnedBuffer = buffers.take();
-
-    returnedBuffer.clear().limit(newCapacity);
-    return returnedBuffer;
-  }
-
-  void free(ByteBuffer toBeFreed) {
-    Preconditions.checkArgument(toBeFreed.capacity() == blockSize);
-    buffers.add(toBeFreed);
-  }
-
-  @Override
-  public long heapSize() {
-    return heapSize;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
deleted file mode 100644
index 88c13fc..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import java.math.BigDecimal;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-import org.apache.hadoop.hbase.io.hfile.BlockPriority;
-import org.apache.hadoop.hbase.io.hfile.BlockType;
-import org.apache.hadoop.hbase.io.hfile.CacheStats;
-import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.CachedBlock;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.HasThread;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * SlabCache is composed of multiple SingleSizeCaches. It uses a TreeMap in
- * order to determine where a given element fits. Redirects gets and puts to the
- * correct SingleSizeCache.
- * 
- * <p>It is configured with a call to {@link #addSlab(int, int)}
- *
- * @deprecated As of 1.0, replaced by {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}.
- */
-@InterfaceAudience.Private
-@Deprecated
-public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
-  private final ConcurrentHashMap<BlockCacheKey, SingleSizeCache> backingStore;
-  private final TreeMap<Integer, SingleSizeCache> slabs;
-  static final Log LOG = LogFactory.getLog(SlabCache.class);
-  static final int STAT_THREAD_PERIOD_SECS = 60 * 5;
-
-  private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1,
-      new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Slab Statistics #%d").build());
-
-  long size;
-  private final CacheStats stats;
-  final SlabStats requestStats;
-  final SlabStats successfullyCachedStats;
-  private final long avgBlockSize;
-  private static final long CACHE_FIXED_OVERHEAD = ClassSize.estimateBase(
-      SlabCache.class, false);
-
-  /**
-   * Key used reading from configuration list of the percentage of our total space we allocate to
-   * the slabs.  Defaults: "0.80", "0.20".
-   * @see #SLAB_CACHE_SIZES_KEY Must have corresponding number of elements.
-   */
-  static final String SLAB_CACHE_PROPORTIONS_KEY = "hbase.offheapcache.slab.proportions";
-
-  /**
-   * Configuration key for list of the blocksize of the slabs in bytes. (E.g. the slab holds
-   * blocks of this size).  Defaults: avgBlockSize * 11 / 10, avgBlockSize * 21 / 10
-   * @see #SLAB_CACHE_PROPORTIONS_KEY
-   */
-  static final String SLAB_CACHE_SIZES_KEY = "hbase.offheapcache.slab.sizes";
-
-  /**
-   * Default constructor, creates an empty SlabCache.
-   *
-   * @param size Total size allocated to the SlabCache. (Bytes)
-   * @param avgBlockSize Average size of a block being cached.
-   **/
-  public SlabCache(long size, long avgBlockSize) {
-    this.avgBlockSize = avgBlockSize;
-    this.size = size;
-    this.stats = new CacheStats();
-    this.requestStats = new SlabStats();
-    this.successfullyCachedStats = new SlabStats();
-
-    backingStore = new ConcurrentHashMap<BlockCacheKey, SingleSizeCache>();
-    slabs = new TreeMap<Integer, SingleSizeCache>();
-    this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
-        STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
-  }
-
-  public Map<Integer, SingleSizeCache> getSizer() {
-    return slabs;
-  }
-
-  /**
-   * A way of allocating the desired amount of Slabs of each particular size.
-   *
-   * This reads two lists from conf, hbase.offheap.slab.proportions and
-   * hbase.offheap.slab.sizes.
-   *
-   * The first list is the percentage of our total space we allocate to the
-   * slabs.
-   *
-   * The second list is blocksize of the slabs in bytes. (E.g. the slab holds
-   * blocks of this size).
-   *
-   * @param conf Configuration file.
-   */
-  public void addSlabByConf(Configuration conf) {
-    // Proportions we allocate to each slab of the total size.
-    String[] porportions = conf.getStrings(SLAB_CACHE_PROPORTIONS_KEY, "0.80", "0.20");
-    String[] sizes = conf.getStrings(SLAB_CACHE_SIZES_KEY,
-        Long.valueOf(avgBlockSize * 11 / 10).toString(),
-        Long.valueOf(avgBlockSize * 21 / 10).toString());
-
-    if (porportions.length != sizes.length) {
-      throw new IllegalArgumentException(
-          "SlabCache conf not "
-              + "initialized, error in configuration. hbase.offheap.slab.proportions specifies "
-              + porportions.length
-              + " slabs while hbase.offheap.slab.sizes specifies "
-              + sizes.length + " slabs "
-              + "offheapslabporportions and offheapslabsizes");
-    }
-    /*
-     * We use BigDecimals instead of floats because float rounding is annoying
-     */
-
-    BigDecimal[] parsedProportions = stringArrayToBigDecimalArray(porportions);
-    BigDecimal[] parsedSizes = stringArrayToBigDecimalArray(sizes);
-
-    BigDecimal sumProportions = new BigDecimal(0);
-    for (BigDecimal b : parsedProportions) {
-      /* Make sure all proportions are greater than 0 */
-      Preconditions
-          .checkArgument(b.compareTo(BigDecimal.ZERO) == 1,
-              "Proportions in hbase.offheap.slab.proportions must be greater than 0!");
-      sumProportions = sumProportions.add(b);
-    }
-
-    /* If the sum is greater than 1 */
-    Preconditions
-        .checkArgument(sumProportions.compareTo(BigDecimal.ONE) != 1,
-            "Sum of all proportions in hbase.offheap.slab.proportions must be less than 1");
-
-    /* If the sum of all proportions is less than 0.99 */
-    if (sumProportions.compareTo(new BigDecimal("0.99")) == -1) {
-      LOG.warn("Sum of hbase.offheap.slab.proportions is less than 0.99! Memory is being wasted");
-    }
-    for (int i = 0; i < parsedProportions.length; i++) {
-      int blockSize = parsedSizes[i].intValue();
-      int numBlocks = new BigDecimal(this.size).multiply(parsedProportions[i])
-          .divide(parsedSizes[i], BigDecimal.ROUND_DOWN).intValue();
-      addSlab(blockSize, numBlocks);
-    }
-  }
-
-  /**
-   * Gets the size of the slab cache a ByteBuffer of this size would be
-   * allocated to.
-   *
-   * @param size Size of the ByteBuffer we are checking.
-   *
-   * @return the Slab that the above bytebuffer would be allocated towards. If
-   *         object is too large, returns null.
-   */
-  Entry<Integer, SingleSizeCache> getHigherBlock(int size) {
-    return slabs.higherEntry(size - 1);
-  }
-
-  private BigDecimal[] stringArrayToBigDecimalArray(String[] parsee) {
-    BigDecimal[] parsed = new BigDecimal[parsee.length];
-    for (int i = 0; i < parsee.length; i++) {
-      parsed[i] = new BigDecimal(parsee[i].trim());
-    }
-    return parsed;
-  }
-
-  private void addSlab(int blockSize, int numBlocks) {
-    LOG.info("Creating slab of blockSize " + blockSize + " with " + numBlocks
-        + " blocks, " + StringUtils.byteDesc(blockSize * (long) numBlocks) + "bytes.");
-    slabs.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
-  }
-
-  /**
-   * Cache the block with the specified key and buffer. First finds what size
-   * SingleSlabCache it should fit in. If the block doesn't fit in any, it will
-   * return without doing anything.
-   * <p>
-   * It is assumed this will NEVER be called on an already cached block. If that
-   * is done, it is assumed that you are reinserting the same exact block due to
-   * a race condition, and will throw a runtime exception.
-   *
-   * @param cacheKey block cache key
-   * @param cachedItem block buffer
-   */
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem) {
-    Entry<Integer, SingleSizeCache> scacheEntry = getHigherBlock(cachedItem
-        .getSerializedLength());
-
-    this.requestStats.addin(cachedItem.getSerializedLength());
-
-    if (scacheEntry == null) {
-      return; // we can't cache, something too big.
-    }
-
-    this.successfullyCachedStats.addin(cachedItem.getSerializedLength());
-    SingleSizeCache scache = scacheEntry.getValue();
-
-    /*
-     * This will throw a runtime exception if we try to cache the same value
-     * twice
-     */
-    scache.cacheBlock(cacheKey, cachedItem);
-  } 
-
-  /**
-   * We don't care about whether its in memory or not, so we just pass the call
-   * through.
-   */
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
-      final boolean cacheDataInL1) {
-    cacheBlock(cacheKey, buf);
-  }
-
-  public CacheStats getStats() {
-    return this.stats;
-  }
-
-  /**
-   * Get the buffer of the block with the specified name.
-   *
-   * @return buffer of specified block name, or null if not in cache
-   */
-  public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
-      boolean updateCacheMetrics) {
-    SingleSizeCache cachedBlock = backingStore.get(key);
-    if (cachedBlock == null) {
-      if (!repeat) stats.miss(caching);
-      return null;
-    }
-
-    Cacheable contentBlock = cachedBlock.getBlock(key, caching, false, updateCacheMetrics);
-
-    if (contentBlock != null) {
-      if (updateCacheMetrics) stats.hit(caching);
-    } else if (!repeat) {
-      if (updateCacheMetrics) stats.miss(caching);
-    }
-    return contentBlock;
-  }
-
-  /**
-   * Evicts a block from the cache. This is public, and thus contributes to the
-   * the evict counter.
-   */
-  public boolean evictBlock(BlockCacheKey cacheKey) {
-    SingleSizeCache cacheEntry = backingStore.get(cacheKey);
-    if (cacheEntry == null) {
-      return false;
-    } else {
-      cacheEntry.evictBlock(cacheKey);
-      return true;
-    }
-  }
-
-  @Override
-  public void onEviction(BlockCacheKey key, SingleSizeCache notifier) {
-    stats.evicted();
-    backingStore.remove(key);
-  }
-  
-  @Override
-  public void onInsertion(BlockCacheKey key, SingleSizeCache notifier) {
-    backingStore.put(key, notifier);
-  }
-
-  /**
-   * Sends a shutdown to all SingleSizeCache's contained by this cache.
-   *
-   * Also terminates the scheduleThreadPool.
-   */
-  public void shutdown() {
-    for (SingleSizeCache s : slabs.values()) {
-      s.shutdown();
-    }
-    this.scheduleThreadPool.shutdown();
-  }
-
-  public long heapSize() {
-    long childCacheSize = 0;
-    for (SingleSizeCache s : slabs.values()) {
-      childCacheSize += s.heapSize();
-    }
-    return SlabCache.CACHE_FIXED_OVERHEAD + childCacheSize;
-  }
-
-  public long size() {
-    return this.size;
-  }
-
-  public long getFreeSize() {
-    long childFreeSize = 0;
-    for (SingleSizeCache s : slabs.values()) {
-      childFreeSize += s.getFreeSize();
-    }
-    return childFreeSize;
-  }
-
-  @Override
-  public long getBlockCount() {
-    long count = 0;
-    for (SingleSizeCache cache : slabs.values()) {
-      count += cache.getBlockCount();
-    }
-    return count;
-  }
-
-  public long getCurrentSize() {
-    return size;
-  }
-
-  public long getEvictedCount() {
-    return stats.getEvictedCount();
-  }
-
-  /*
-   * Statistics thread. Periodically prints the cache statistics to the log.
-   * TODO: Fix.  Just emit to metrics.  Don't run a thread just to do a log.
-   */
-  static class StatisticsThread extends HasThread {
-    SlabCache ourcache;
-
-    public StatisticsThread(SlabCache slabCache) {
-      super("SlabCache.StatisticsThread");
-      setDaemon(true);
-      this.ourcache = slabCache;
-    }
-
-    @Override
-    public void run() {
-      for (SingleSizeCache s : ourcache.slabs.values()) {
-        s.logStats();
-      }
-
-      SlabCache.LOG.info("Current heap size is: "
-          + StringUtils.humanReadableInt(ourcache.heapSize()));
-
-      LOG.info("Request Stats");
-      ourcache.requestStats.logStats();
-      LOG.info("Successfully Cached Stats");
-      ourcache.successfullyCachedStats.logStats();
-    }
-
-  }
-
-  /**
-   * Just like CacheStats, but more Slab specific. Finely grained profiling of
-   * sizes we store using logs.
-   *
-   */
-  static class SlabStats {
-    // the maximum size somebody will ever try to cache, then we multiply by
-    // 10
-    // so we have finer grained stats.
-    static final int MULTIPLIER = 10;
-    final int NUMDIVISIONS = (int) (Math.log(Integer.MAX_VALUE) * MULTIPLIER);
-    private final AtomicLong[] counts = new AtomicLong[NUMDIVISIONS];
-
-    public SlabStats() {
-      for (int i = 0; i < NUMDIVISIONS; i++) {
-        counts[i] = new AtomicLong();
-      }
-    }
-
-    public void addin(int size) {
-      int index = (int) (Math.log(size) * MULTIPLIER);
-      counts[index].incrementAndGet();
-    }
-
-    public AtomicLong[] getUsage() {
-      return counts;
-    }
-
-    double getUpperBound(int index) {
-      return Math.pow(Math.E, ((index + 0.5) / MULTIPLIER));
-    }
-
-    double getLowerBound(int index) {
-      return Math.pow(Math.E, ((index - 0.5) / MULTIPLIER));
-    }
-
-    public void logStats() {
-      AtomicLong[] fineGrainedStats = getUsage();
-      for (int i = 0; i < fineGrainedStats.length; i++) {
-
-        if (fineGrainedStats[i].get() > 0) {
-          SlabCache.LOG.info("From  "
-              + StringUtils.humanReadableInt((long) getLowerBound(i)) + "- "
-              + StringUtils.humanReadableInt((long) getUpperBound(i)) + ": "
-              + StringUtils.humanReadableInt(fineGrainedStats[i].get())
-              + " requests");
-
-        }
-      }
-    }
-  }
-
-  public int evictBlocksByHfileName(String hfileName) {
-    int numEvicted = 0;
-    for (BlockCacheKey key : backingStore.keySet()) {
-      if (key.getHfileName().equals(hfileName)) {
-        if (evictBlock(key))
-          ++numEvicted;
-      }
-    }
-    return numEvicted;
-  }
-
-  @Override
-  public Iterator<CachedBlock> iterator() {
-    // Don't bother with ramcache since stuff is in here only a little while.
-    final Iterator<Map.Entry<BlockCacheKey, SingleSizeCache>> i =
-        this.backingStore.entrySet().iterator();
-    return new Iterator<CachedBlock>() {
-      private final long now = System.nanoTime();
-
-      @Override
-      public boolean hasNext() {
-        return i.hasNext();
-      }
-
-      @Override
-      public CachedBlock next() {
-        final Map.Entry<BlockCacheKey, SingleSizeCache> e = i.next();
-        final Cacheable cacheable = e.getValue().getBlock(e.getKey(), false, false, false);
-        return new CachedBlock() {
-          @Override
-          public String toString() {
-            return BlockCacheUtil.toString(this, now);
-          }
-
-          @Override
-          public BlockPriority getBlockPriority() {
-            return null;
-          }
-
-          @Override
-          public BlockType getBlockType() {
-            return cacheable.getBlockType();
-          }
-
-          @Override
-          public long getOffset() {
-            return e.getKey().getOffset();
-          }
-
-          @Override
-          public long getSize() {
-            return cacheable == null? 0: cacheable.getSerializedLength();
-          }
-
-          @Override
-          public long getCachedTime() {
-            return -1;
-          }
-
-          @Override
-          public String getFilename() {
-            return e.getKey().getHfileName();
-          }
-
-          @Override
-          public int compareTo(CachedBlock other) {
-            return (int)(this.getOffset() - other.getOffset());
-          }
-        };
-      }
-
-      @Override
-      public void remove() {
-        throw new UnsupportedOperationException();
-      }
-    };
-  }
-
-  @Override
-  public BlockCache[] getBlockCaches() {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabItemActionWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabItemActionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabItemActionWatcher.java
deleted file mode 100644
index 93b35db..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabItemActionWatcher.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-
-/**
- * Interface for objects that want to know when actions occur in a SingleSizeCache.
- *
- * @deprecated As of 1.0, replaced by {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}.
- */
-@InterfaceAudience.Private
-@Deprecated
-interface SlabItemActionWatcher {
-
-  /**
-   * This is called as a callback when an item is removed from a SingleSizeCache.
-   *
-   * @param key the key of the item being evicted
-   * @param notifier the object notifying the SlabCache of the eviction.
-   */
-  void onEviction(BlockCacheKey key, SingleSizeCache notifier);
-  
-  /**
-   * This is called as a callback when an item is inserted into a SingleSizeCache.
-   *
-   * @param key the key of the item being added
-   * @param notifier the object notifying the SlabCache of the insertion..
-   */
-  void onInsertion(BlockCacheKey key, SingleSizeCache notifier);
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 03bc4f0..facef82 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -281,9 +281,7 @@ public class TableMapReduceUtil {
   public static void resetCacheConfig(Configuration conf) {
     conf.setFloat(
       HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
-    conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0f);
     conf.setFloat(CacheConfig.BUCKET_CACHE_SIZE_KEY, 0f);
-    conf.setFloat("hbase.offheapcache.percentage", 0f);
     conf.setFloat("hbase.bucketcache.size", 0f);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index 38a72b4..fa1f10f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -70,23 +70,6 @@ public class TestBlockCacheReporting {
   }
 
   @Test
-  public void testSlabCacheConfig() throws JsonGenerationException, JsonMappingException, IOException {
-    this.conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0.1f);
-    CacheConfig cc = new CacheConfig(this.conf);
-    assertTrue(cc.getBlockCache() instanceof DoubleBlockCache);
-    logPerBlock(cc.getBlockCache());
-    final int count = 3;
-    addDataAndHits(cc.getBlockCache(), count);
-    // The below has no asserts.  It is just exercising toString and toJSON code.
-    LOG.info(cc.getBlockCache().getStats());
-    BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache());
-    LOG.info(cbsbf);
-    logPerFile(cbsbf);
-    bucketCacheReport(cc.getBlockCache());
-    LOG.info(BlockCacheUtil.toJSON(cbsbf));
-  }
-
-  @Test
   public void testBucketCache() throws JsonGenerationException, JsonMappingException, IOException {
     this.conf.set(CacheConfig.BUCKET_CACHE_IOENGINE_KEY, "offheap");
     this.conf.setInt(CacheConfig.BUCKET_CACHE_SIZE_KEY, 100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index 0c37b3b..8f7ed2b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -198,15 +198,6 @@ public class TestCacheConfig {
   }
 
   @Test
-  public void testSlabCacheConfig() {
-    this.conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0.1f);
-    CacheConfig cc = new CacheConfig(this.conf);
-    basicBlockCacheOps(cc, true, true);
-    assertTrue(cc.getBlockCache() instanceof DoubleBlockCache);
-    // TODO Assert sizes allocated are right.
-  }
-
-  @Test
   public void testBucketCacheConfig() {
     this.conf.set(CacheConfig.BUCKET_CACHE_IOENGINE_KEY, "offheap");
     this.conf.setInt(CacheConfig.BUCKET_CACHE_SIZE_KEY, 100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSingleSizeCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSingleSizeCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSingleSizeCache.java
deleted file mode 100644
index 8a0dc16..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSingleSizeCache.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests SingleSlabCache.
- * <p>
- *
- * Tests will ensure that evictions operate when they're supposed to and do what
- * they should, and that cached blocks are accessible when expected to be.
- */
-// Starts 100 threads, high variability of execution time => Medium
-@Category(MediumTests.class)
-public class TestSingleSizeCache {
-  SingleSizeCache cache;
-  final int CACHE_SIZE = 1000000;
-  final int NUM_BLOCKS = 100;
-  final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS;
-  final int NUM_THREADS = 100;
-  final int NUM_QUERIES = 10000;
-
-  @Before
-  public void setup() {
-    cache = new SingleSizeCache(BLOCK_SIZE, NUM_BLOCKS, null);
-  }
-
-  @After
-  public void tearDown() {
-    cache.shutdown();
-  }
-
-  @Test
-  public void testCacheSimple() throws Exception {
-    CacheTestUtils.testCacheSimple(cache, BLOCK_SIZE, NUM_QUERIES);
-  }
-
-  @Test
-  public void testCacheMultiThreaded() throws Exception {
-    CacheTestUtils.testCacheMultiThreaded(cache, BLOCK_SIZE,
-        NUM_THREADS, NUM_QUERIES, 0.80);
-  }
-
-  @Test
-  public void testCacheMultiThreadedSingleKey() throws Exception {
-    CacheTestUtils.hammerSingleKey(cache, BLOCK_SIZE, NUM_THREADS, NUM_QUERIES);
-  }
-
-  @Test
-  public void testCacheMultiThreadedEviction() throws Exception {
-    CacheTestUtils.hammerEviction(cache, BLOCK_SIZE, NUM_THREADS, NUM_QUERIES);
-  }
-
-  @Test
-  public void testHeapSizeChanges(){
-    CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE);
-  }
-
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlab.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlab.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlab.java
deleted file mode 100644
index 71d708a..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlab.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import static org.junit.Assert.*;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.junit.*;
-import org.junit.experimental.categories.Category;
-
-/**Test cases for Slab.java*/
-@Category(SmallTests.class)
-public class TestSlab {
-  static final int BLOCKSIZE = 1000;
-  static final int NUMBLOCKS = 100;
-  Slab testSlab;
-  ByteBuffer[] buffers = new ByteBuffer[NUMBLOCKS];
-
-  @Before
-  public void setUp() {
-    testSlab = new Slab(BLOCKSIZE, NUMBLOCKS);
-  }
-
-  @After
-  public void tearDown() {
-    testSlab.shutdown();
-  }
-
-  @Test
-  public void testBasicFunctionality() throws InterruptedException {
-    for (int i = 0; i < NUMBLOCKS; i++) {
-      buffers[i] = testSlab.alloc(BLOCKSIZE);
-      assertEquals(BLOCKSIZE, buffers[i].limit());
-    }
-
-    // write an unique integer to each allocated buffer.
-    for (int i = 0; i < NUMBLOCKS; i++) {
-      buffers[i].putInt(i);
-    }
-
-    // make sure the bytebuffers remain unique (the slab allocator hasn't
-    // allocated the same chunk of memory twice)
-    for (int i = 0; i < NUMBLOCKS; i++) {
-      buffers[i].putInt(i);
-    }
-
-    for (int i = 0; i < NUMBLOCKS; i++) {
-      testSlab.free(buffers[i]); // free all the buffers.
-    }
-
-    for (int i = 0; i < NUMBLOCKS; i++) {
-      buffers[i] = testSlab.alloc(BLOCKSIZE);
-      assertEquals(BLOCKSIZE, buffers[i].limit());
-    }
-  }
-
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/20cac213/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlabCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlabCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlabCache.java
deleted file mode 100644
index 41ee705..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/slab/TestSlabCache.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.slab;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-import org.apache.hadoop.hbase.io.hfile.slab.SlabCache.SlabStats;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.Ignore;
-import org.junit.experimental.categories.Category;
-
-/**
- * Basic test of SlabCache. Puts and gets.
- * <p>
- *
- * Tests will ensure that blocks that are uncached are identical to the ones
- * being cached, and that the cache never exceeds its capacity. Note that its
- * fine if the cache evicts before it reaches max capacity - Guava Mapmaker may
- * choose to evict at any time.
- *
- */
-// Starts 50 threads, high variability of execution time => Medium
-@Category(MediumTests.class)
-public class TestSlabCache {
-  static final int CACHE_SIZE = 1000000;
-  static final int NUM_BLOCKS = 101;
-  static final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS;
-  static final int NUM_THREADS = 50;
-  static final int NUM_QUERIES = 10000;
-  SlabCache cache;
-
-  @Before
-  public void setup() {
-    cache = new SlabCache(CACHE_SIZE + BLOCK_SIZE * 2, BLOCK_SIZE);
-    cache.addSlabByConf(new Configuration());
-  }
-
-  @After
-  public void tearDown() {
-    cache.shutdown();
-  }
-
-  @Test
-  public void testElementPlacement() {
-    assertEquals(cache.getHigherBlock(BLOCK_SIZE).getKey().intValue(),
-        (BLOCK_SIZE * 11 / 10));
-    assertEquals(cache.getHigherBlock((BLOCK_SIZE * 2)).getKey()
-        .intValue(), (BLOCK_SIZE * 21 / 10));
-  }
-
- @Test
-  public void testCacheSimple() throws Exception {
-    CacheTestUtils.testCacheSimple(cache, BLOCK_SIZE, NUM_QUERIES);
-  }
-
-  @Test
-  public void testCacheMultiThreaded() throws Exception {
-    CacheTestUtils.testCacheMultiThreaded(cache, BLOCK_SIZE, NUM_THREADS,
-        NUM_QUERIES, 0.80);
-  }
-
-  @Test
-  public void testCacheMultiThreadedSingleKey() throws Exception {
-    CacheTestUtils.hammerSingleKey(cache, BLOCK_SIZE, NUM_THREADS, NUM_QUERIES);
-  }
-
-  @Test
-  public void testCacheMultiThreadedEviction() throws Exception {
-    CacheTestUtils.hammerEviction(cache, BLOCK_SIZE, 10, NUM_QUERIES);
-  }
-
-  @Test
-  /*Just checks if ranges overlap*/
-  public void testStatsArithmetic(){
-    SlabStats test = cache.requestStats;
-    for(int i = 0; i < test.NUMDIVISIONS; i++){
-      assertTrue("Upper for index " + i + " is " + test.getUpperBound(i) +
-          " lower " + test.getLowerBound(i + 1),
-          test.getUpperBound(i) <= test.getLowerBound(i + 1));
-    }
-  }
-
-  @Test
-  public void testHeapSizeChanges(){
-    CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE);
-  }
-
-}
-