You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by op...@apache.org on 2019/05/31 07:15:22 UTC

[hbase] 12/17: HBASE-21921 Notify users if the ByteBufAllocator is always allocating ByteBuffers from heap which means the increacing GC pressure

This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e11dc0b2b53b59a07bd20e4e0985374b85132269
Author: huzheng <op...@gmail.com>
AuthorDate: Mon Apr 29 15:31:19 2019 +0800

    HBASE-21921 Notify users if the ByteBufAllocator is always allocating ByteBuffers from heap which means the increacing GC pressure
---
 .../apache/hadoop/hbase/io/ByteBuffAllocator.java  | 48 ++++++++++++--
 .../hadoop/hbase/io/TestByteBuffAllocator.java     | 75 ++++++++++++++--------
 .../hbase/io/TestByteBufferListOutputStream.java   |  4 +-
 .../regionserver/MetricsRegionServerSource.java    | 15 +++++
 .../regionserver/MetricsRegionServerWrapper.java   | 28 +++++---
 .../MetricsRegionServerSourceImpl.java             | 17 ++++-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon     |  3 +-
 .../tmpl/regionserver/ServerMetricsTmpl.jamon      | 28 ++++++++
 .../MetricsRegionServerWrapperImpl.java            | 33 +++++++++-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    | 12 ++--
 .../hadoop/hbase/io/hfile/TestHFileBlock.java      |  2 +-
 .../MetricsRegionServerWrapperStub.java            | 25 ++++++++
 .../hbase/regionserver/TestRSStatusServlet.java    | 29 +++------
 13 files changed, 244 insertions(+), 75 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java
index 51de22a..5939d4a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java
@@ -24,6 +24,8 @@ import java.util.List;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.LongAdder;
+
 import sun.nio.ch.DirectBuffer;
 
 import org.apache.hadoop.conf.Configuration;
@@ -95,6 +97,13 @@ public class ByteBuffAllocator {
 
   private final Queue<ByteBuffer> buffers = new ConcurrentLinkedQueue<>();
 
+  // Metrics to track the pool allocation number and heap allocation number. If heap allocation
+  // number is increasing so much, then we may need to increase the max.buffer.count .
+  private final LongAdder poolAllocationNum = new LongAdder();
+  private final LongAdder heapAllocationNum = new LongAdder();
+  private long lastPoolAllocationNum = 0;
+  private long lastHeapAllocationNum = 0;
+
   /**
    * Initialize an {@link ByteBuffAllocator} which will try to allocate ByteBuffers from off-heap if
    * reservoir is enabled and the reservoir has enough buffers, otherwise the allocator will just
@@ -152,11 +161,35 @@ public class ByteBuffAllocator {
     return reservoirEnabled;
   }
 
+  public long getHeapAllocationNum() {
+    return heapAllocationNum.sum();
+  }
+
+  public long getPoolAllocationNum() {
+    return poolAllocationNum.sum();
+  }
+
   @VisibleForTesting
-  public int getQueueSize() {
+  public int getFreeBufferCount() {
     return this.buffers.size();
   }
 
+  public int getTotalBufferCount() {
+    return maxBufCount;
+  }
+
+  public double getHeapAllocationRatio() {
+    long heapAllocNum = heapAllocationNum.sum(), poolAllocNum = poolAllocationNum.sum();
+    double heapDelta = heapAllocNum - lastHeapAllocationNum;
+    double poolDelta = poolAllocNum - lastPoolAllocationNum;
+    lastHeapAllocationNum = heapAllocNum;
+    lastPoolAllocationNum = poolAllocNum;
+    if (Math.abs(heapDelta + poolDelta) < 1e-3) {
+      return 0.0;
+    }
+    return heapDelta / (heapDelta + poolDelta) * 100;
+  }
+
   /**
    * Allocate an buffer with buffer size from ByteBuffAllocator, Note to call the
    * {@link ByteBuff#release()} if no need any more, otherwise the memory leak happen in NIO
@@ -171,11 +204,12 @@ public class ByteBuffAllocator {
       }
     }
     // Allocated from heap, let the JVM free its memory.
-    return allocateOnHeap(this.bufSize);
+    return (SingleByteBuff) ByteBuff.wrap(allocateOnHeap(bufSize));
   }
 
-  private static SingleByteBuff allocateOnHeap(int size) {
-    return new SingleByteBuff(NONE, ByteBuffer.allocate(size));
+  private ByteBuffer allocateOnHeap(int size) {
+    heapAllocationNum.increment();
+    return ByteBuffer.allocate(size);
   }
 
   /**
@@ -190,7 +224,7 @@ public class ByteBuffAllocator {
     }
     // If disabled the reservoir, just allocate it from on-heap.
     if (!isReservoirEnabled() || size == 0) {
-      return allocateOnHeap(size);
+      return ByteBuff.wrap(allocateOnHeap(size));
     }
     int reminder = size % bufSize;
     int len = size / bufSize + (reminder > 0 ? 1 : 0);
@@ -210,7 +244,7 @@ public class ByteBuffAllocator {
     if (remain > 0) {
       // If the last ByteBuffer is too small or the reservoir can not provide more ByteBuffers, we
       // just allocate the ByteBuffer from on-heap.
-      bbs.add(ByteBuffer.allocate(remain));
+      bbs.add(allocateOnHeap(remain));
     }
     ByteBuff bb = ByteBuff.wrap(bbs, () -> {
       for (int i = 0; i < lenFromReservoir; i++) {
@@ -248,6 +282,7 @@ public class ByteBuffAllocator {
     if (bb != null) {
       // To reset the limit to capacity and position to 0, must clear here.
       bb.clear();
+      poolAllocationNum.increment();
       return bb;
     }
     while (true) {
@@ -264,6 +299,7 @@ public class ByteBuffAllocator {
       if (!this.usedBufCount.compareAndSet(c, c + 1)) {
         continue;
       }
+      poolAllocationNum.increment();
       return ByteBuffer.allocateDirect(bufSize);
     }
   }
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocator.java
index 4375032..0d0da80 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocator.java
@@ -48,19 +48,26 @@ public class TestByteBuffAllocator {
     int bufSize = 6 * 1024;
     ByteBuffAllocator alloc = new ByteBuffAllocator(true, maxBuffersInPool, bufSize, bufSize / 6);
     ByteBuff buff = alloc.allocate(10 * bufSize);
+    assertEquals(10, alloc.getPoolAllocationNum());
+    assertEquals(0, alloc.getHeapAllocationNum());
     buff.release();
     // When the request size is less than 1/6th of the pool buffer size. We should use on demand
     // created on heap Buffer
     buff = alloc.allocate(200);
     assertTrue(buff.hasArray());
-    assertEquals(maxBuffersInPool, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
+    assertEquals(maxBuffersInPool, alloc.getTotalBufferCount());
+    assertEquals(10, alloc.getPoolAllocationNum());
+    assertEquals(1, alloc.getHeapAllocationNum());
     buff.release();
     // When the request size is > 1/6th of the pool buffer size.
     buff = alloc.allocate(1024);
     assertFalse(buff.hasArray());
-    assertEquals(maxBuffersInPool - 1, alloc.getQueueSize());
-    buff.release();// ByteBuffDeallocaor#free should put back the BB to pool.
-    assertEquals(maxBuffersInPool, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool - 1, alloc.getFreeBufferCount());
+    assertEquals(11, alloc.getPoolAllocationNum());
+    assertEquals(1, alloc.getHeapAllocationNum());
+    buff.release();// ByteBuff Recycler#free should put back the BB to pool.
+    assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
     // Request size> pool buffer size
     buff = alloc.allocate(7 * 1024);
     assertFalse(buff.hasArray());
@@ -71,9 +78,11 @@ public class TestByteBuffAllocator {
     assertTrue(bbs[1].isDirect());
     assertEquals(6 * 1024, bbs[0].limit());
     assertEquals(1024, bbs[1].limit());
-    assertEquals(maxBuffersInPool - 2, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool - 2, alloc.getFreeBufferCount());
+    assertEquals(13, alloc.getPoolAllocationNum());
+    assertEquals(1, alloc.getHeapAllocationNum());
     buff.release();
-    assertEquals(maxBuffersInPool, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
 
     buff = alloc.allocate(6 * 1024 + 200);
     assertFalse(buff.hasArray());
@@ -84,11 +93,16 @@ public class TestByteBuffAllocator {
     assertFalse(bbs[1].isDirect());
     assertEquals(6 * 1024, bbs[0].limit());
     assertEquals(200, bbs[1].limit());
-    assertEquals(maxBuffersInPool - 1, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool - 1, alloc.getFreeBufferCount());
+    assertEquals(14, alloc.getPoolAllocationNum());
+    assertEquals(2, alloc.getHeapAllocationNum());
     buff.release();
-    assertEquals(maxBuffersInPool, alloc.getQueueSize());
+    assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
 
     alloc.allocate(bufSize * (maxBuffersInPool - 1));
+    assertEquals(23, alloc.getPoolAllocationNum());
+    assertEquals(2, alloc.getHeapAllocationNum());
+
     buff = alloc.allocate(20 * 1024);
     assertFalse(buff.hasArray());
     assertTrue(buff instanceof MultiByteBuff);
@@ -98,23 +112,29 @@ public class TestByteBuffAllocator {
     assertFalse(bbs[1].isDirect());
     assertEquals(6 * 1024, bbs[0].limit());
     assertEquals(14 * 1024, bbs[1].limit());
-    assertEquals(0, alloc.getQueueSize());
+    assertEquals(0, alloc.getFreeBufferCount());
+    assertEquals(24, alloc.getPoolAllocationNum());
+    assertEquals(3, alloc.getHeapAllocationNum());
+
     buff.release();
-    assertEquals(1, alloc.getQueueSize());
+    assertEquals(1, alloc.getFreeBufferCount());
     alloc.allocateOneBuffer();
+    assertEquals(25, alloc.getPoolAllocationNum());
+    assertEquals(3, alloc.getHeapAllocationNum());
 
     buff = alloc.allocate(7 * 1024);
     assertTrue(buff.hasArray());
     assertTrue(buff instanceof SingleByteBuff);
     assertEquals(7 * 1024, buff.nioByteBuffers()[0].limit());
+    assertEquals(25, alloc.getPoolAllocationNum());
+    assertEquals(4, alloc.getHeapAllocationNum());
     buff.release();
   }
 
   @Test
   public void testNegativeAllocatedSize() {
     int maxBuffersInPool = 10;
-    ByteBuffAllocator allocator =
-        new ByteBuffAllocator(true, maxBuffersInPool, 6 * 1024, 1024);
+    ByteBuffAllocator allocator = new ByteBuffAllocator(true, maxBuffersInPool, 6 * 1024, 1024);
     try {
       allocator.allocate(-1);
       fail("Should throw exception when size < 0");
@@ -122,6 +142,7 @@ public class TestByteBuffAllocator {
       // expected exception
     }
     ByteBuff bb = allocator.allocate(0);
+    assertEquals(1, allocator.getHeapAllocationNum());
     bb.release();
   }
 
@@ -169,7 +190,7 @@ public class TestByteBuffAllocator {
     dup2.release();
     assertEquals(0, buf2.refCnt());
     assertEquals(0, dup2.refCnt());
-    assertEquals(0, alloc.getQueueSize());
+    assertEquals(0, alloc.getFreeBufferCount());
     assertException(dup2::position);
     assertException(buf2::position);
 
@@ -178,7 +199,7 @@ public class TestByteBuffAllocator {
     dup1.release();
     assertEquals(0, buf1.refCnt());
     assertEquals(0, dup1.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
     assertException(dup1::position);
     assertException(buf1::position);
 
@@ -189,7 +210,7 @@ public class TestByteBuffAllocator {
     slice3.release();
     assertEquals(0, buf3.refCnt());
     assertEquals(0, slice3.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // slice the buf4, if the slice4 released, buf4 will also be released (MultipleByteBuffer)
     ByteBuff buf4 = alloc.allocate(bufSize * 2);
@@ -198,7 +219,7 @@ public class TestByteBuffAllocator {
     slice4.release();
     assertEquals(0, buf4.refCnt());
     assertEquals(0, slice4.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // Test multiple reference for the same ByteBuff (SingleByteBuff)
     ByteBuff buf5 = alloc.allocateOneBuffer();
@@ -206,7 +227,7 @@ public class TestByteBuffAllocator {
     slice5.release();
     assertEquals(0, buf5.refCnt());
     assertEquals(0, slice5.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
     assertException(slice5::position);
     assertException(buf5::position);
 
@@ -216,7 +237,7 @@ public class TestByteBuffAllocator {
     slice6.release();
     assertEquals(0, buf6.refCnt());
     assertEquals(0, slice6.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // Test retain the parent SingleByteBuff (duplicate)
     ByteBuff parent = alloc.allocateOneBuffer();
@@ -225,11 +246,11 @@ public class TestByteBuffAllocator {
     parent.release();
     assertEquals(1, child.refCnt());
     assertEquals(1, parent.refCnt());
-    assertEquals(1, alloc.getQueueSize());
+    assertEquals(1, alloc.getFreeBufferCount());
     parent.release();
     assertEquals(0, child.refCnt());
     assertEquals(0, parent.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // Test retain parent MultiByteBuff (duplicate)
     parent = alloc.allocate(bufSize << 1);
@@ -238,11 +259,11 @@ public class TestByteBuffAllocator {
     parent.release();
     assertEquals(1, child.refCnt());
     assertEquals(1, parent.refCnt());
-    assertEquals(0, alloc.getQueueSize());
+    assertEquals(0, alloc.getFreeBufferCount());
     parent.release();
     assertEquals(0, child.refCnt());
     assertEquals(0, parent.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // Test retain the parent SingleByteBuff (slice)
     parent = alloc.allocateOneBuffer();
@@ -251,11 +272,11 @@ public class TestByteBuffAllocator {
     parent.release();
     assertEquals(1, child.refCnt());
     assertEquals(1, parent.refCnt());
-    assertEquals(1, alloc.getQueueSize());
+    assertEquals(1, alloc.getFreeBufferCount());
     parent.release();
     assertEquals(0, child.refCnt());
     assertEquals(0, parent.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
 
     // Test retain parent MultiByteBuff (slice)
     parent = alloc.allocate(bufSize << 1);
@@ -264,11 +285,11 @@ public class TestByteBuffAllocator {
     parent.release();
     assertEquals(1, child.refCnt());
     assertEquals(1, parent.refCnt());
-    assertEquals(0, alloc.getQueueSize());
+    assertEquals(0, alloc.getFreeBufferCount());
     parent.release();
     assertEquals(0, child.refCnt());
     assertEquals(0, parent.refCnt());
-    assertEquals(2, alloc.getQueueSize());
+    assertEquals(2, alloc.getFreeBufferCount());
   }
 
   @Test
@@ -282,7 +303,7 @@ public class TestByteBuffAllocator {
     buf1.release();
     assertEquals(0, buf1.refCnt());
     assertEquals(0, dup1.refCnt());
-    assertEquals(1, alloc.getQueueSize());
+    assertEquals(1, alloc.getFreeBufferCount());
     assertException(buf1::position);
     assertException(dup1::position);
   }
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferListOutputStream.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferListOutputStream.java
index 3ac7a75..1943a43 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferListOutputStream.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferListOutputStream.java
@@ -55,7 +55,7 @@ public class TestByteBufferListOutputStream {
     bb1.release();
     bbos.writeInt(123);
     bbos.writeInt(124);
-    assertEquals(0, alloc.getQueueSize());
+    assertEquals(0, alloc.getFreeBufferCount());
     List<ByteBuffer> allBufs = bbos.getByteBuffers();
     assertEquals(4, allBufs.size());
     assertEquals(4, bbos.allBufs.size());
@@ -80,6 +80,6 @@ public class TestByteBufferListOutputStream {
     assertEquals(4, b4.remaining());
     assertEquals(124, b4.getInt());
     bbos.releaseResources();
-    assertEquals(3, alloc.getQueueSize());
+    assertEquals(3, alloc.getFreeBufferCount());
   }
 }
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 8a7e647..68548c8 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -558,4 +558,19 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
   String AVERAGE_REGION_SIZE = "averageRegionSize";
   String AVERAGE_REGION_SIZE_DESC =
       "Average region size over the RegionServer including memstore and storefile sizes.";
+
+  /** Metrics for {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} **/
+  String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_NUM = "ByteBuffAllocatorHeapAllocationNum";
+  String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_NUM_DESC =
+      "Number of heap allocation from ByteBuffAllocator";
+  String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_NUM = "ByteBuffAllocatorPoolAllocationNum";
+  String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_NUM_DESC =
+      "Number of pool allocation from ByteBuffAllocator";
+  String BYTE_BUFF_ALLOCATOR_HEAP_ALLOACTION_RATIO = "ByteBuffAllocatorHeapAllocationRatio";
+  String BYTE_BUFF_ALLOCATOR_HEAP_ALLOACTION_RATIO_DESC =
+      "Ratio of heap allocation from ByteBuffAllocator, means heapAllocation/totalAllocation";
+  String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT = "ByteBuffAllocatorTotalBufferCount";
+  String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC = "Total buffer count in ByteBuffAllocator";
+  String BYTE_BUFF_ALLOCATOR_FREE_BUFFER_COUNT = "ByteBuffAllocatorFreeBufferCount";
+  String BYTE_BUFF_ALLOCATOR_FREE_BUFFER_COUNT_DESC = "Free buffer count in ByteBuffAllocator";
 }
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 03ebc4c..c196cda 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -230,7 +230,7 @@ public interface MetricsRegionServerWrapper {
    */
   int getFlushQueueSize();
 
-  public long getMemStoreLimit();
+  long getMemStoreLimit();
   /**
    * Get the size (in bytes) of the block cache that is free.
    */
@@ -295,42 +295,42 @@ public interface MetricsRegionServerWrapper {
   /**
    * Hit count of L1 cache.
    */
-  public long getL1CacheHitCount();
+  long getL1CacheHitCount();
 
   /**
    * Miss count of L1 cache.
    */
-  public long getL1CacheMissCount();
+  long getL1CacheMissCount();
 
   /**
    * Hit ratio of L1 cache.
    */
-  public double getL1CacheHitRatio();
+  double getL1CacheHitRatio();
 
   /**
    * Miss ratio of L1 cache.
    */
-  public double getL1CacheMissRatio();
+  double getL1CacheMissRatio();
 
   /**
    * Hit count of L2 cache.
    */
-  public long getL2CacheHitCount();
+  long getL2CacheHitCount();
 
   /**
    * Miss count of L2 cache.
    */
-  public long getL2CacheMissCount();
+  long getL2CacheMissCount();
 
   /**
    * Hit ratio of L2 cache.
    */
-  public double getL2CacheHitRatio();
+  double getL2CacheHitRatio();
 
   /**
    * Miss ratio of L2 cache.
    */
-  public double getL2CacheMissRatio();
+  double getL2CacheMissRatio();
 
   /**
    * Force a re-computation of the metrics.
@@ -523,4 +523,14 @@ public interface MetricsRegionServerWrapper {
   long getTrailerHitCount();
 
   long getTotalRowActionRequestCount();
+
+  long getByteBuffAllocatorHeapAllocationNum();
+
+  long getByteBuffAllocatorPoolAllocationNum();
+
+  double getByteBuffAllocatorHeapAllocRatio();
+
+  long getByteBuffAllocatorTotalBufferCount();
+
+  long getByteBuffAllocatorFreeBufferCount();
 }
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 58c42a5..e259022 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -553,7 +553,22 @@ public class MetricsRegionServerSourceImpl
             .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC),
                     rsWrap.getReadRequestsRatePerSecond())
             .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC),
-                    rsWrap.getWriteRequestsRatePerSecond());
+                    rsWrap.getWriteRequestsRatePerSecond())
+            .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_NUM,
+                BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_NUM_DESC),
+                rsWrap.getByteBuffAllocatorHeapAllocationNum())
+            .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_NUM,
+                BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_NUM_DESC),
+                rsWrap.getByteBuffAllocatorPoolAllocationNum())
+            .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOACTION_RATIO,
+                BYTE_BUFF_ALLOCATOR_HEAP_ALLOACTION_RATIO_DESC),
+                rsWrap.getByteBuffAllocatorHeapAllocRatio())
+            .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT,
+                BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC),
+                rsWrap.getByteBuffAllocatorTotalBufferCount())
+            .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_FREE_BUFFER_COUNT,
+                BYTE_BUFF_ALLOCATOR_FREE_BUFFER_COUNT_DESC),
+                rsWrap.getByteBuffAllocatorFreeBufferCount());
   }
 
   @Override
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
index bee5da3..dfa915b 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
@@ -134,7 +134,8 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
     <section>
     <h2>Server Metrics</h2>
     <& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper();
-      mServerWrap = regionServer.getRpcServer().getMetrics().getHBaseServerWrapper(); &>
+      mServerWrap = regionServer.getRpcServer().getMetrics().getHBaseServerWrapper();
+      bbAllocator = regionServer.getRpcServer().getByteBuffAllocator(); &>
     </section>
 
     <section>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index adcfff1..5ace343 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -19,10 +19,12 @@ limitations under the License.
 <%args>
 MetricsRegionServerWrapper mWrap;
 MetricsHBaseServerWrapper mServerWrap;
+ByteBuffAllocator bbAllocator;
 </%args>
 <%import>
 java.util.*;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
+org.apache.hadoop.hbase.io.ByteBuffAllocator;
 org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper;
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
 org.apache.hadoop.hbase.util.Bytes;
@@ -45,6 +47,7 @@ org.apache.hadoop.hbase.io.util.MemorySizeUtil;
         <li class=""><a href="#tab_walStats" data-toggle="tab">WALs</a></li>
         <li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
         <li class=""><a href="#tab_queueStats" data-toggle="tab">Queues</a></li>
+        <li class=""><a href="#tab_byteBuffAllocatorStats" data-toggle="tab">ByteBuffAllocator Stats</a></li>
     </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
         <div class="tab-pane active" id="tab_baseStats">
@@ -65,6 +68,9 @@ org.apache.hadoop.hbase.io.util.MemorySizeUtil;
         <div class="tab-pane" id="tab_queueStats">
             <& queueStats; mWrap = mWrap; mServerWrap = mServerWrap; &>
         </div>
+        <div class="tab-pane" id="tab_byteBuffAllocatorStats">
+            <& byteBuffAllocatorStats; bbAllocator = bbAllocator; &>
+        </div>
     </div>
 </div>
 
@@ -225,3 +231,25 @@ MetricsHBaseServerWrapper mServerWrap;
 </tr>
 </table>
 </%def>
+
+<%def byteBuffAllocatorStats>
+<%args>
+ByteBuffAllocator bbAllocator;
+</%args>
+<table class="table table-striped">
+<tr>
+    <th>Number of Heap Allocation</th>
+    <th>Number of Pool Allocation</th>
+    <th>Heap Allocation Ratio</th>
+    <th>Total Buffer Count</th>
+    <th>Free Buffer Count</th>
+</tr>
+<tr>
+    <td><% bbAllocator.getHeapAllocationNum() %></td>
+    <td><% bbAllocator.getPoolAllocationNum() %></td>
+    <td><% bbAllocator.getHeapAllocationRatio() %>%</td>
+    <td><% bbAllocator.getTotalBufferCount() %></td>
+    <td><% bbAllocator.getFreeBufferCount() %></td>
+</tr>
+</table>
+</%def>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 33a6ee0..db72d11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
@@ -58,6 +59,7 @@ class MetricsRegionServerWrapperImpl
 
   private final HRegionServer regionServer;
   private final MetricsWALSource metricsWALSource;
+  private final ByteBuffAllocator allocator;
 
   private Optional<BlockCache> blockCache;
   private Optional<MobFileCache> mobFileCache;
@@ -129,15 +131,15 @@ class MetricsRegionServerWrapperImpl
     initBlockCache();
     initMobFileCache();
 
-    this.period =
-        regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
-          HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);
+    this.period = regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
+      HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);
 
     this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
     this.runnable = new RegionServerMetricsWrapperRunnable();
     this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
       TimeUnit.MILLISECONDS);
     this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
+    this.allocator = regionServer.getRpcServer().getByteBuffAllocator();
 
     try {
       this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
@@ -1006,4 +1008,29 @@ class MetricsRegionServerWrapperImpl
   public long getTrailerHitCount() {
     return this.cacheStats.map(CacheStats::getTrailerHitCount).orElse(0L);
   }
+
+  @Override
+  public long getByteBuffAllocatorHeapAllocationNum() {
+    return this.allocator.getHeapAllocationNum();
+  }
+
+  @Override
+  public long getByteBuffAllocatorPoolAllocationNum() {
+    return this.allocator.getPoolAllocationNum();
+  }
+
+  @Override
+  public double getByteBuffAllocatorHeapAllocRatio() {
+    return this.allocator.getHeapAllocationRatio();
+  }
+
+  @Override
+  public long getByteBuffAllocatorTotalBufferCount() {
+    return this.allocator.getTotalBufferCount();
+  }
+
+  @Override
+  public long getByteBuffAllocatorFreeBufferCount() {
+    return this.allocator.getFreeBufferCount();
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 101fd91..84e24e6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -123,10 +123,10 @@ public class TestHFile  {
     List<ByteBuff> buffs = new ArrayList<>();
     for (int i = 0; i < bufCount; i++) {
       buffs.add(alloc.allocateOneBuffer());
-      Assert.assertEquals(alloc.getQueueSize(), 0);
+      Assert.assertEquals(alloc.getFreeBufferCount(), 0);
     }
     buffs.forEach(ByteBuff::release);
-    Assert.assertEquals(alloc.getQueueSize(), bufCount);
+    Assert.assertEquals(alloc.getFreeBufferCount(), bufCount);
   }
 
   @Test
@@ -143,7 +143,7 @@ public class TestHFile  {
       // fail test
       assertTrue(false);
     }
-    Assert.assertEquals(bufCount, alloc.getQueueSize());
+    Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
     alloc.clean();
   }
 
@@ -171,11 +171,11 @@ public class TestHFile  {
       Assert.assertTrue(cachedBlock instanceof HFileBlock);
       Assert.assertTrue(((HFileBlock) cachedBlock).isOnHeap());
       // Should never allocate off-heap block from allocator because ensure that it's LRU.
-      Assert.assertEquals(bufCount, alloc.getQueueSize());
+      Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
       block.release(); // return back the ByteBuffer back to allocator.
     }
     reader.close();
-    Assert.assertEquals(bufCount, alloc.getQueueSize());
+    Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
     alloc.clean();
     lru.shutdown();
   }
@@ -229,7 +229,7 @@ public class TestHFile  {
     }
     reader.close();
     combined.shutdown();
-    Assert.assertEquals(bufCount, alloc.getQueueSize());
+    Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
     alloc.clean();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index af42a24..5fdd7a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -156,7 +156,7 @@ public class TestHFileBlock {
 
   private void assertAllocator() {
     if (!useHeapAllocator) {
-      assertEquals(MAX_BUFFER_COUNT, alloc.getQueueSize());
+      assertEquals(MAX_BUFFER_COUNT, alloc.getFreeBufferCount());
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index b003b44..035167a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -116,6 +116,31 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
   }
 
   @Override
+  public long getByteBuffAllocatorHeapAllocationNum() {
+    return 0;
+  }
+
+  @Override
+  public long getByteBuffAllocatorPoolAllocationNum() {
+    return 0;
+  }
+
+  @Override
+  public double getByteBuffAllocatorHeapAllocRatio() {
+    return 0;
+  }
+
+  @Override
+  public long getByteBuffAllocatorTotalBufferCount() {
+    return 0;
+  }
+
+  @Override
+  public long getByteBuffAllocatorFreeBufferCount() {
+    return 0;
+  }
+
+  @Override
   public long getReadRequestsCount() {
     return 997;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
index fd3a56d..c3cf9d3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.ipc.MetricsHBaseServer;
 import org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperStub;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -50,12 +51,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 
 /**
@@ -92,12 +90,10 @@ public class TestRSStatusServlet {
     rs = Mockito.mock(HRegionServer.class);
     rpcServices = Mockito.mock(RSRpcServices.class);
     rpcServer = Mockito.mock(RpcServerInterface.class);
-    Mockito.doReturn(HBaseConfiguration.create())
-      .when(rs).getConfiguration();
+    Mockito.doReturn(HBaseConfiguration.create()).when(rs).getConfiguration();
     Mockito.doReturn(rpcServices).when(rs).getRSRpcServices();
     Mockito.doReturn(rpcServer).when(rs).getRpcServer();
-    Mockito.doReturn(fakeResponse).when(rpcServices).getServerInfo(
-      (RpcController)Mockito.any(), (GetServerInfoRequest)Mockito.any());
+    Mockito.doReturn(fakeResponse).when(rpcServices).getServerInfo(Mockito.any(), Mockito.any());
     // Fake ZKW
     ZKWatcher zkw = Mockito.mock(ZKWatcher.class);
     Mockito.doReturn("fakequorum").when(zkw).getQuorum();
@@ -119,6 +115,7 @@ public class TestRSStatusServlet {
     MetricsHBaseServer ms = Mockito.mock(MetricsHBaseServer.class);
     Mockito.doReturn(new MetricsHBaseServerWrapperStub()).when(ms).getHBaseServerWrapper();
     Mockito.doReturn(ms).when(rpcServer).getMetrics();
+    Mockito.doReturn(ByteBuffAllocator.HEAP).when(rpcServer).getByteBuffAllocator();
   }
 
   @Test
@@ -130,18 +127,12 @@ public class TestRSStatusServlet {
   public void testWithRegions() throws IOException, ServiceException {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
     List<RegionInfo> regions = Lists.newArrayList(
-        RegionInfoBuilder.newBuilder(htd.getTableName())
-            .setStartKey(Bytes.toBytes("a"))
-            .setEndKey(Bytes.toBytes("d"))
-            .build(),
-        RegionInfoBuilder.newBuilder(htd.getTableName())
-            .setStartKey(Bytes.toBytes("d"))
-            .setEndKey(Bytes.toBytes("z"))
-            .build()
-        );
-    Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse(
-      regions)).when(rpcServices).getOnlineRegion((RpcController)Mockito.any(),
-        (GetOnlineRegionRequest)Mockito.any());
+      RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(Bytes.toBytes("a"))
+          .setEndKey(Bytes.toBytes("d")).build(),
+      RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(Bytes.toBytes("d"))
+          .setEndKey(Bytes.toBytes("z")).build());
+    Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse(regions)).when(rpcServices)
+        .getOnlineRegion(Mockito.any(), Mockito.any());
     new RSStatusTmpl().render(new StringWriter(), rs);
   }
 }