You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by an...@apache.org on 2017/03/10 05:43:19 UTC

[2/2] hbase git commit: HBASE-17338 Treat Cell data size under global memstore heap size only when that Cell can not be copied to MSLAB.

HBASE-17338 Treat Cell data size under global memstore heap size only when that Cell can not be copied to MSLAB.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab597077
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab597077
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab597077

Branch: refs/heads/master
Commit: ab5970773ad187871270bb63f9a37cead4cf2f6a
Parents: c63a871
Author: anoopsamjohn <an...@gmail.com>
Authored: Fri Mar 10 11:12:59 2017 +0530
Committer: anoopsamjohn <an...@gmail.com>
Committed: Fri Mar 10 11:12:59 2017 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hbase/ByteBufferKeyValue.java |  7 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java  | 36 ---------
 .../org/apache/hadoop/hbase/ExtendedCell.java   |  5 --
 .../hadoop/hbase/IndividualBytesFieldCell.java  |  3 +-
 .../java/org/apache/hadoop/hbase/KeyValue.java  | 10 ---
 .../apache/hadoop/hbase/SizeCachedKeyValue.java |  5 --
 .../io/encoding/BufferedDataBlockEncoder.java   | 10 ---
 .../hbase/regionserver/AbstractMemStore.java    |  6 +-
 .../hbase/regionserver/CompactingMemStore.java  | 14 ++--
 .../hbase/regionserver/CompactionPipeline.java  | 30 ++++----
 .../regionserver/CompositeImmutableSegment.java | 56 ++++++++------
 .../hbase/regionserver/DefaultMemStore.java     | 14 ++--
 .../hadoop/hbase/regionserver/HRegion.java      | 10 +--
 .../hadoop/hbase/regionserver/HStore.java       |  8 +-
 .../hbase/regionserver/HeapMemoryManager.java   |  4 +-
 .../hbase/regionserver/ImmutableSegment.java    | 16 ++--
 .../hbase/regionserver/MemStoreFlusher.java     | 30 ++++----
 .../hbase/regionserver/MemStoreSnapshot.java    |  8 +-
 .../hadoop/hbase/regionserver/MemstoreSize.java | 55 +++++++-------
 .../hbase/regionserver/MutableSegment.java      |  6 +-
 .../regionserver/RegionServerAccounting.java    | 56 +++++---------
 .../hadoop/hbase/regionserver/Segment.java      | 50 ++++++------
 .../apache/hadoop/hbase/regionserver/Store.java |  9 +++
 .../regionserver/TestCompactingMemStore.java    | 67 ++++++++--------
 .../TestCompactingToCellArrayMapMemStore.java   | 80 +++++++-------------
 .../hbase/regionserver/TestDefaultMemStore.java |  4 +-
 .../regionserver/TestHeapMemoryManager.java     |  3 +-
 .../regionserver/TestPerColumnFamilyFlush.java  | 18 +++--
 .../hadoop/hbase/regionserver/TestStore.java    |  3 +-
 .../TestWalAndCompactingMemStoreFlush.java      | 45 ++++++-----
 30 files changed, 289 insertions(+), 379 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
index 66bc9b1..0b9caac 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
@@ -40,7 +40,7 @@ public class ByteBufferKeyValue extends ByteBufferCell implements ExtendedCell {
   protected final int length;
   private long seqId = 0;
 
-  private static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
+  public static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
       + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_LONG;
 
   public ByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId) {
@@ -302,11 +302,6 @@ public class ByteBufferKeyValue extends ByteBufferCell implements ExtendedCell {
   }
 
   @Override
-  public long heapOverhead() {
-    return FIXED_OVERHEAD;
-  }
-
-  @Override
   public Cell deepClone() {
     byte[] copy = new byte[this.length];
     ByteBufferUtils.copyFromBufferToArray(copy, this.buf, this.offset, 0, this.length);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 5930928..bb5197f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -622,15 +622,6 @@ public final class CellUtil {
     }
 
     @Override
-    public long heapOverhead() {
-      long overhead = ((ExtendedCell) this.cell).heapOverhead() + HEAP_SIZE_OVERHEAD;
-      if (this.tags != null) {
-        overhead += ClassSize.ARRAY;
-      }
-      return overhead;
-    }
-
-    @Override
     public Cell deepClone() {
       Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
       return new TagRewriteCell(clonedBaseCell, this.tags);
@@ -812,15 +803,6 @@ public final class CellUtil {
     }
 
     @Override
-    public long heapOverhead() {
-      long overhead = ((ExtendedCell) this.cell).heapOverhead() + HEAP_SIZE_OVERHEAD;
-      if (this.tags != null) {
-        overhead += ClassSize.ARRAY;
-      }
-      return overhead;
-    }
-
-    @Override
     public Cell deepClone() {
       Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
       if (clonedBaseCell instanceof ByteBufferCell) {
@@ -964,15 +946,6 @@ public final class CellUtil {
     }
 
     @Override
-    public long heapOverhead() {
-      long overhead = super.heapOverhead() + ClassSize.REFERENCE;
-      if (this.value != null) {
-        overhead += ClassSize.ARRAY;
-      }
-      return overhead;
-    }
-
-    @Override
     public Cell deepClone() {
       Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
       return new ValueAndTagRewriteCell(clonedBaseCell, this.value, this.tags);
@@ -1039,15 +1012,6 @@ public final class CellUtil {
     }
 
     @Override
-    public long heapOverhead() {
-      long overhead = super.heapOverhead() + ClassSize.REFERENCE;
-      if (this.value != null) {
-        overhead += ClassSize.ARRAY;
-      }
-      return overhead;
-    }
-
-    @Override
     public Cell deepClone() {
       Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
       if (clonedBaseCell instanceof ByteBufferCell) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index 23d2243..517873f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -69,11 +69,6 @@ public interface ExtendedCell extends Cell, SettableSequenceId, SettableTimestam
   void write(ByteBuffer buf, int offset);
 
   /**
-   * @return The heap size overhead associated with this Cell.
-   */
-  long heapOverhead();
-
-  /**
    * Does a deep copy of the contents to a new memory area and returns it as a new cell.
    * @return The deep cloned cell
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
index 3b37c5f..218a531 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
@@ -120,8 +120,7 @@ public class IndividualBytesFieldCell implements ExtendedCell {
                                getValueLength(), getTagsLength(), withTags);
   }
 
-  @Override
-  public long heapOverhead() {
+  private long heapOverhead() {
     return   FIXED_OVERHEAD
            + ClassSize.ARRAY                               // row      , can not be null
            + ((family    == null) ? 0 : ClassSize.ARRAY)   // family   , can be null

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 96fc30b..4fff830 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -2804,16 +2804,6 @@ public class KeyValue implements ExtendedCell {
       // of Cell to be returned back over the RPC
       throw new IllegalStateException("A reader should never return this type of a Cell");
     }
-
-    @Override
-    public long heapOverhead() {
-      return super.heapOverhead() + Bytes.SIZEOF_SHORT;
-    }
-  }
-
-  @Override
-  public long heapOverhead() {
-    return FIXED_OVERHEAD;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java
index 60c4cbf..142415c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java
@@ -62,9 +62,4 @@ public class SizeCachedKeyValue extends KeyValue {
   public long heapSize() {
     return super.heapSize() + FIXED_OVERHEAD;
   }
-
-  @Override
-  public long heapOverhead() {
-    return super.heapOverhead() + FIXED_OVERHEAD;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
index 22d7e3e..65266f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
@@ -470,11 +470,6 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
     }
 
     @Override
-    public long heapOverhead() {
-      return FIXED_OVERHEAD;
-    }
-
-    @Override
     public Cell deepClone() {
       // This is not used in actual flow. Throwing UnsupportedOperationException
       throw new UnsupportedOperationException();
@@ -720,11 +715,6 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
     }
 
     @Override
-    public long heapOverhead() {
-      return FIXED_OVERHEAD;
-    }
-
-    @Override
     public Cell deepClone() {
       // This is not used in actual flow. Throwing UnsupportedOperationException
       throw new UnsupportedOperationException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 6c7886f..d44486c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -153,7 +153,7 @@ public abstract class AbstractMemStore implements MemStore {
 
   @Override
   public MemstoreSize getSnapshotSize() {
-    return new MemstoreSize(this.snapshot.keySize(), this.snapshot.heapOverhead());
+    return new MemstoreSize(this.snapshot.keySize(), this.snapshot.heapSize());
   }
 
   @Override
@@ -281,10 +281,10 @@ public abstract class AbstractMemStore implements MemStore {
   protected abstract long keySize();
 
   /**
-   * @return The total heap overhead of cells in this memstore. We will not consider cells in the
+   * @return The total heap size of cells in this memstore. We will not consider cells in the
    *         snapshot
    */
-  protected abstract long heapOverhead();
+  protected abstract long heapSize();
 
   protected CellComparator getComparator() {
     return comparator;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 511bd80..926b3f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -123,9 +123,9 @@ public class CompactingMemStore extends AbstractMemStore {
   @Override
   public MemstoreSize size() {
     MemstoreSize memstoreSize = new MemstoreSize();
-    memstoreSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
+    memstoreSize.incMemstoreSize(this.active.keySize(), this.active.heapSize());
     for (Segment item : pipeline.getSegments()) {
-      memstoreSize.incMemstoreSize(item.keySize(), item.heapOverhead());
+      memstoreSize.incMemstoreSize(item.keySize(), item.heapSize());
     }
     return memstoreSize;
   }
@@ -196,13 +196,13 @@ public class CompactingMemStore extends AbstractMemStore {
       // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed
       if (compositeSnapshot) {
         snapshotSize = pipeline.getPipelineSize();
-        snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
+        snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapSize());
       } else {
         snapshotSize = pipeline.getTailSize();
       }
     }
     return snapshotSize.getDataSize() > 0 ? snapshotSize
-        : new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
+        : new MemstoreSize(this.active.keySize(), this.active.heapSize());
   }
 
   @Override
@@ -216,11 +216,11 @@ public class CompactingMemStore extends AbstractMemStore {
   }
 
   @Override
-  protected long heapOverhead() {
+  protected long heapSize() {
     // Need to consider heapOverhead of all segments in pipeline and active
-    long h = this.active.heapOverhead();
+    long h = this.active.heapSize();
     for (Segment segment : this.pipeline.getSegments()) {
-      h += segment.heapOverhead();
+      h += segment.heapSize();
     }
     return h;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index 9a844e6..e64c0fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -141,24 +141,24 @@ public class CompactionPipeline {
       long newDataSize = 0;
       if(segment != null) newDataSize = segment.keySize();
       long dataSizeDelta = suffixDataSize - newDataSize;
-      long suffixHeapOverhead = getSegmentsHeapOverhead(suffix);
-      long newHeapOverhead = 0;
-      if(segment != null) newHeapOverhead = segment.heapOverhead();
-      long heapOverheadDelta = suffixHeapOverhead - newHeapOverhead;
-      region.addMemstoreSize(new MemstoreSize(-dataSizeDelta, -heapOverheadDelta));
+      long suffixHeapSize = getSegmentsHeapSize(suffix);
+      long newHeapSize = 0;
+      if(segment != null) newHeapSize = segment.heapSize();
+      long heapSizeDelta = suffixHeapSize - newHeapSize;
+      region.addMemstoreSize(new MemstoreSize(-dataSizeDelta, -heapSizeDelta));
       if (LOG.isDebugEnabled()) {
         LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: "
-            + newDataSize + ". Suffix heap overhead: " + suffixHeapOverhead
-            + " new segment heap overhead: " + newHeapOverhead);
+            + newDataSize + ". Suffix heap size: " + suffixHeapSize
+            + " new segment heap size: " + newHeapSize);
       }
     }
     return true;
   }
 
-  private static long getSegmentsHeapOverhead(List<? extends Segment> list) {
+  private static long getSegmentsHeapSize(List<? extends Segment> list) {
     long res = 0;
     for (Segment segment : list) {
-      res += segment.heapOverhead();
+      res += segment.heapSize();
     }
     return res;
   }
@@ -234,20 +234,20 @@ public class CompactionPipeline {
 
   public MemstoreSize getTailSize() {
     LinkedList<? extends Segment> localCopy = readOnlyCopy;
-    if (localCopy.isEmpty()) return MemstoreSize.EMPTY_SIZE;
-    return new MemstoreSize(localCopy.peekLast().keySize(), localCopy.peekLast().heapOverhead());
+    if (localCopy.isEmpty()) return new MemstoreSize(true);
+    return new MemstoreSize(localCopy.peekLast().keySize(), localCopy.peekLast().heapSize());
   }
 
   public MemstoreSize getPipelineSize() {
     long keySize = 0;
-    long heapOverhead = 0;
+    long heapSize = 0;
     LinkedList<? extends Segment> localCopy = readOnlyCopy;
-    if (localCopy.isEmpty()) return MemstoreSize.EMPTY_SIZE;
+    if (localCopy.isEmpty()) return new MemstoreSize(true);
     for (Segment segment : localCopy) {
       keySize += segment.keySize();
-      heapOverhead += segment.heapOverhead();
+      heapSize += segment.heapSize();
     }
-    return new MemstoreSize(keySize, heapOverhead);
+    return new MemstoreSize(keySize, heapSize);
   }
 
   private void swapSuffix(List<? extends Segment> suffix, ImmutableSegment segment,

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 73556bd..eeade4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 
@@ -43,7 +42,6 @@ import java.util.SortedSet;
 public class CompositeImmutableSegment extends ImmutableSegment {
 
   private final List<ImmutableSegment> segments;
-  private final CellComparator comparator;
   // CompositeImmutableSegment is used for snapshots and snapshot should
   // support getTimeRangeTracker() interface.
   // Thus we hold a constant TRT build in the construction time from TRT of the given segments.
@@ -53,7 +51,6 @@ public class CompositeImmutableSegment extends ImmutableSegment {
 
   public CompositeImmutableSegment(CellComparator comparator, List<ImmutableSegment> segments) {
     super(comparator);
-    this.comparator = comparator;
     this.segments = segments;
     this.timeRangeTracker = new TimeRangeTracker();
     for (ImmutableSegment s : segments) {
@@ -64,10 +61,12 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   }
 
   @VisibleForTesting
+  @Override
   public List<Segment> getAllSegments() {
     return new LinkedList<>(segments);
   }
 
+  @Override
   public int getNumOfSegments() {
     return segments.size();
   }
@@ -77,6 +76,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    * general segment scanner.
    * @return a special scanner for the MemStoreSnapshot object
    */
+  @Override
   public KeyValueScanner getSnapshotScanner() {
     return getScanner(Long.MAX_VALUE, Long.MAX_VALUE);
   }
@@ -84,6 +84,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * @return whether the segment has any cells
    */
+  @Override
   public boolean isEmpty() {
     for (ImmutableSegment s : segments) {
       if (!s.isEmpty()) return false;
@@ -94,6 +95,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * @return number of cells in segment
    */
+  @Override
   public int getCellsCount() {
     int result = 0;
     for (ImmutableSegment s : segments) {
@@ -105,6 +107,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * @return the first cell in the segment that has equal or greater key than the given cell
    */
+  @Override
   public Cell getFirstAfter(Cell cell) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
@@ -112,6 +115,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * Closing a segment before it is being discarded
    */
+  @Override
   public void close() {
     for (ImmutableSegment s : segments) {
       s.close();
@@ -123,14 +127,17 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    * otherwise the given cell is returned
    * @return either the given cell or its clone
    */
+  @Override
   public Cell maybeCloneWithAllocator(Cell cell) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public boolean shouldSeek(Scan scan, long oldestUnexpiredTS){
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public long getMinTimestamp(){
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
@@ -139,6 +146,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    * Creates the scanner for the given read point
    * @return a scanner for the given read point
    */
+  @Override
   public KeyValueScanner getScanner(long readPoint) {
     // Long.MAX_VALUE is DEFAULT_SCANNER_ORDER
     return getScanner(readPoint,Long.MAX_VALUE);
@@ -148,6 +156,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    * Creates the scanner for the given read point, and a specific order in a list
    * @return a scanner for the given read point
    */
+  @Override
   public KeyValueScanner getScanner(long readPoint, long order) {
     KeyValueScanner resultScanner;
     List<KeyValueScanner> list = new ArrayList<>(segments.size());
@@ -164,6 +173,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
     return resultScanner;
   }
 
+  @Override
   public boolean isTagsPresent() {
     for (ImmutableSegment s : segments) {
       if (s.isTagsPresent()) return true;
@@ -171,10 +181,12 @@ public class CompositeImmutableSegment extends ImmutableSegment {
     return false;
   }
 
+  @Override
   public void incScannerCount() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public void decScannerCount() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
@@ -184,7 +196,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    * immutable CellSet after its creation in immutable segment constructor
    * @return this object
    */
-
+  @Override
   protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
@@ -192,17 +204,19 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * @return Sum of all cell sizes.
    */
+  @Override
   public long keySize() {
     return this.keySize;
   }
 
   /**
-   * @return The heap overhead of this segment.
+   * @return The heap size of this segment.
    */
-  public long heapOverhead() {
+  @Override
+  public long heapSize() {
     long result = 0;
     for (ImmutableSegment s : segments) {
-      result += s.heapOverhead();
+      result += s.heapSize();
     }
     return result;
   }
@@ -210,39 +224,43 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * Updates the heap size counter of the segment by the given delta
    */
+  @Override
   protected void incSize(long delta, long heapOverhead) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
-  protected void incHeapOverheadSize(long delta) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
+  @Override
   public long getMinSequenceId() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public TimeRangeTracker getTimeRangeTracker() {
     return this.timeRangeTracker;
   }
 
   //*** Methods for SegmentsScanner
+  @Override
   public Cell last() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public Iterator<Cell> iterator() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public int compare(Cell left, Cell right) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   public int compareRows(Cell left, Cell right) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
@@ -250,36 +268,28 @@ public class CompositeImmutableSegment extends ImmutableSegment {
   /**
    * @return a set of all cells in the segment
    */
+  @Override
   protected CellSet getCellSet() {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
-  /**
-   * Returns the Cell comparator used by this segment
-   * @return the Cell comparator used by this segment
-   */
-  protected CellComparator getComparator() {
-    return comparator;
-  }
-
+  @Override
   protected void internalAdd(Cell cell, boolean mslabUsed, MemstoreSize memstoreSize) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
+  @Override
   protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
       MemstoreSize memstoreSize) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }
 
-  protected long heapOverheadChange(Cell cell, boolean succ) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
   /**
    * Returns a subset of the segment cell set, which starts with the given cell
    * @param firstCell a cell in the segment
    * @return a subset of the segment cell set, which starts with the given cell
    */
+  @Override
   protected SortedSet<Cell> tailSet(Cell firstCell) {
     throw new IllegalStateException("Not supported by CompositeImmutableScanner");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 4757e1d..b3e9c65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -111,7 +111,7 @@ public class DefaultMemStore extends AbstractMemStore {
   public MemstoreSize getFlushableSize() {
     MemstoreSize snapshotSize = getSnapshotSize();
     return snapshotSize.getDataSize() > 0 ? snapshotSize
-        : new MemstoreSize(keySize(), heapOverhead());
+        : new MemstoreSize(keySize(), heapSize());
   }
 
   @Override
@@ -120,8 +120,8 @@ public class DefaultMemStore extends AbstractMemStore {
   }
 
   @Override
-  protected long heapOverhead() {
-    return this.active.heapOverhead();
+  protected long heapSize() {
+    return this.active.heapSize();
   }
 
   @Override
@@ -160,7 +160,7 @@ public class DefaultMemStore extends AbstractMemStore {
 
   @Override
   public MemstoreSize size() {
-    return new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
+    return new MemstoreSize(this.active.keySize(), this.active.heapSize());
   }
 
   /**
@@ -205,12 +205,12 @@ public class DefaultMemStore extends AbstractMemStore {
       memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSize);
     }
     LOG.info("memstore1 estimated size="
-        + (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+        + (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
     for (int i = 0; i < count; i++) {
       memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSize);
     }
     LOG.info("memstore1 estimated size (2nd loading of same data)="
-        + (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+        + (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
     // Make a variably sized memstore.
     DefaultMemStore memstore2 = new DefaultMemStore();
     memstoreSize = new MemstoreSize();
@@ -218,7 +218,7 @@ public class DefaultMemStore extends AbstractMemStore {
       memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memstoreSize);
     }
     LOG.info("memstore2 estimated size="
-        + (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+        + (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
     final int seconds = 30;
     LOG.info("Waiting " + seconds + " seconds while heap dump is taken");
     LOG.info("Exiting.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 29773c3..bf2d3cb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1689,7 +1689,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
       this.closed.set(true);
       if (!canFlush) {
-        this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapOverhead()));
+        this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapSize()));
       } else if (memstoreDataSize.get() != 0) {
         LOG.error("Memstore size is " + memstoreDataSize.get());
       }
@@ -1713,12 +1713,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
   }
 
-  private long getMemstoreHeapOverhead() {
-    long overhead = 0;
+  private long getMemstoreHeapSize() {
+    long size = 0;
     for (Store s : this.stores.values()) {
-      overhead += s.getSizeOfMemStore().getHeapOverhead();
+      size += s.getSizeOfMemStore().getHeapSize();
     }
-    return overhead;
+    return size;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 8a66c3a..a988c5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -380,7 +380,7 @@ public class HStore implements Store {
   @Deprecated
   public long getFlushableSize() {
     MemstoreSize size = getSizeToFlush();
-    return size.getDataSize() + size.getHeapOverhead();
+    return size.getHeapSize();
   }
 
   @Override
@@ -392,7 +392,7 @@ public class HStore implements Store {
   @Deprecated
   public long getSnapshotSize() {
     MemstoreSize size = getSizeOfSnapshot();
-    return size.getDataSize() + size.getHeapOverhead();
+    return size.getHeapSize();
   }
 
   @Override
@@ -2067,7 +2067,7 @@ public class HStore implements Store {
   @Deprecated
   public long getMemStoreSize() {
     MemstoreSize size = getSizeOfMemStore();
-    return size.getDataSize() + size.getHeapOverhead();
+    return size.getHeapSize();
   }
 
   @Override
@@ -2298,7 +2298,7 @@ public class HStore implements Store {
   @Override
   public long heapSize() {
     MemstoreSize memstoreSize = this.memstore.size();
-    return DEEP_OVERHEAD + memstoreSize.getDataSize() + memstoreSize.getHeapOverhead();
+    return DEEP_OVERHEAD + memstoreSize.getHeapSize();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index e834306..93e502a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY;
 
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryType;
 import java.lang.management.MemoryUsage;
 import java.util.ArrayList;
 import java.util.List;
@@ -326,7 +324,7 @@ public class HeapMemoryManager {
       // TODO : add support for offheap metrics
       tunerContext.setCurBlockCacheUsed((float) blockCache.getCurrentSize() / maxHeapSize);
       metricsHeapMemoryManager.setCurBlockCacheSizeGauge(blockCache.getCurrentSize());
-      long globalMemstoreHeapSize = regionServerAccounting.getGlobalMemstoreSize();
+      long globalMemstoreHeapSize = regionServerAccounting.getGlobalMemstoreHeapSize();
       tunerContext.setCurMemStoreUsed((float) globalMemstoreHeapSize / maxHeapSize);
       metricsHeapMemoryManager.setCurMemStoreSizeGauge(globalMemstoreHeapSize);
       tunerContext.setCurBlockCacheSize(blockCachePercent);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index 501c1e9..c8d27b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -21,8 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.ExtendedCell;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.client.Scan;
@@ -119,14 +118,13 @@ public class ImmutableSegment extends Segment {
     super(new CellSet(comparator), // initiailize the CellSet with empty CellSet
         comparator, memStoreLAB);
     type = Type.SKIPLIST_MAP_BASED;
-    MemstoreSize memstoreSize = new MemstoreSize();
     while (iterator.hasNext()) {
       Cell c = iterator.next();
       // The scanner is doing all the elimination logic
       // now we just copy it to the new segment
       Cell newKV = maybeCloneWithAllocator(c);
       boolean usedMSLAB = (newKV != c);
-      internalAdd(newKV, usedMSLAB, memstoreSize);
+      internalAdd(newKV, usedMSLAB, null);
     }
     this.timeRange = this.timeRangeTracker == null ? null : this.timeRangeTracker.toTimeRange();
   }
@@ -226,17 +224,13 @@ public class ImmutableSegment extends Segment {
   }
 
   @Override
-  protected long heapOverheadChange(Cell cell, boolean succ) {
+  protected long heapSizeChange(Cell cell, boolean succ) {
     if (succ) {
       switch (this.type) {
       case SKIPLIST_MAP_BASED:
-        return super.heapOverheadChange(cell, succ);
+        return super.heapSizeChange(cell, succ);
       case ARRAY_MAP_BASED:
-        if (cell instanceof ExtendedCell) {
-          return ClassSize
-              .align(ClassSize.CELL_ARRAY_MAP_ENTRY + ((ExtendedCell) cell).heapOverhead());
-        }
-        return ClassSize.align(ClassSize.CELL_ARRAY_MAP_ENTRY + KeyValue.FIXED_OVERHEAD);
+        return ClassSize.align(ClassSize.CELL_ARRAY_MAP_ENTRY + CellUtil.estimatedHeapSizeOf(cell));
       }
     }
     return 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 174d3ca..7fddb36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -186,11 +186,11 @@ class MemStoreFlusher implements FlushRequester {
            (bestRegionReplica.getMemstoreSize()
                > secondaryMultiplier * regionToFlush.getMemstoreSize()))) {
         LOG.info("Refreshing storefiles of region " + bestRegionReplica
-            + " due to global heap pressure. Total memstore size="
+            + " due to global heap pressure. Total memstore datasize="
             + StringUtils
                 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize())
-            + " memstore heap overhead=" + StringUtils.humanReadableInt(
-                server.getRegionServerAccounting().getGlobalMemstoreHeapOverhead()));
+            + " memstore heap size=" + StringUtils.humanReadableInt(
+                server.getRegionServerAccounting().getGlobalMemstoreHeapSize()));
         flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica);
         if (!flushedOne) {
           LOG.info("Excluding secondary region " + bestRegionReplica +
@@ -587,14 +587,20 @@ class MemStoreFlusher implements FlushRequester {
             if (!blocked) {
               startTime = EnvironmentEdgeManager.currentTime();
               if (!server.getRegionServerAccounting().isOffheap()) {
-                logMsg("the global memstore size", "global memstore heap overhead");
+                logMsg("global memstore heapsize",
+                    server.getRegionServerAccounting().getGlobalMemstoreHeapSize(),
+                    server.getRegionServerAccounting().getGlobalMemstoreLimit());
               } else {
                 switch (flushType) {
                 case ABOVE_OFFHEAP_HIGHER_MARK:
-                  logMsg("the global offheap memstore size", "global memstore heap overhead");
+                  logMsg("the global offheap memstore datasize",
+                      server.getRegionServerAccounting().getGlobalMemstoreDataSize(),
+                      server.getRegionServerAccounting().getGlobalMemstoreLimit());
                   break;
                 case ABOVE_ONHEAP_HIGHER_MARK:
-                  logMsg("global memstore heap overhead", "");
+                  logMsg("global memstore heapsize",
+                      server.getRegionServerAccounting().getGlobalMemstoreHeapSize(),
+                      server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit());
                   break;
                 default:
                   break;
@@ -635,16 +641,10 @@ class MemStoreFlusher implements FlushRequester {
     scope.close();
   }
 
-  private void logMsg(String string1, String string2) {
+  private void logMsg(String string1, long val, long max) {
     LOG.info("Blocking updates on " + server.toString() + ": " + string1 + " "
-        + TraditionalBinaryPrefix
-            .long2String(server.getRegionServerAccounting().getGlobalMemstoreDataSize(), "", 1)
-        + " + " + string2 + " "
-        + TraditionalBinaryPrefix
-            .long2String(server.getRegionServerAccounting().getGlobalMemstoreHeapOverhead(), "", 1)
-        + " is >= than blocking " + TraditionalBinaryPrefix.long2String(
-          server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1)
-        + " size");
+        + TraditionalBinaryPrefix.long2String(val, "", 1) + " is >= than blocking "
+        + TraditionalBinaryPrefix.long2String(max, "", 1) + " size");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
index 61e7876..3858b1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
@@ -29,7 +29,7 @@ public class MemStoreSnapshot {
   private final long id;
   private final int cellsCount;
   private final long dataSize;
-  private final long heapOverhead;
+  private final long heapSize;
   private final TimeRangeTracker timeRangeTracker;
   private final KeyValueScanner scanner;
   private final boolean tagsPresent;
@@ -38,7 +38,7 @@ public class MemStoreSnapshot {
     this.id = id;
     this.cellsCount = snapshot.getCellsCount();
     this.dataSize = snapshot.keySize();
-    this.heapOverhead = snapshot.heapOverhead();
+    this.heapSize = snapshot.heapSize();
     this.timeRangeTracker = snapshot.getTimeRangeTracker();
     this.scanner = snapshot.getSnapshotScanner();
     this.tagsPresent = snapshot.isTagsPresent();
@@ -65,8 +65,8 @@ public class MemStoreSnapshot {
     return dataSize;
   }
 
-  public long getHeapOverhead() {
-    return this.heapOverhead;
+  public long getHeapSize() {
+    return this.heapSize;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
index fa7c342..cb0f100 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
@@ -20,26 +20,29 @@ package org.apache.hadoop.hbase.regionserver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
- * Wraps the data size part and heap overhead of the memstore.
+ * Wraps the data size part and total heap space occupied by the memstore.
  */
 @InterfaceAudience.Private
 public class MemstoreSize {
 
+  // 'dataSize' tracks the Cell's data bytes size alone (Key bytes, value bytes). A cell's data can
+  // be in on heap or off heap area depending on the MSLAB and its configuration to be using on heap
+  // or off heap LABs
   private long dataSize;
-  private long heapOverhead;
+  // 'heapSize' tracks all Cell's heap size occupancy. This will include Cell POJO heap overhead.
+  // When Cells in on heap area, this will include the cells data size as well.
+  private long heapSize;
   final private boolean isEmpty;
 
-  static final MemstoreSize EMPTY_SIZE = new MemstoreSize(true);
-
   public MemstoreSize() {
     dataSize = 0;
-    heapOverhead = 0;
+    heapSize = 0;
     isEmpty = false;
   }
 
   public MemstoreSize(boolean isEmpty) {
     dataSize = 0;
-    heapOverhead = 0;
+    heapSize = 0;
     this.isEmpty = isEmpty;
   }
 
@@ -47,40 +50,38 @@ public class MemstoreSize {
     return isEmpty;
   }
 
-  public MemstoreSize(long dataSize, long heapOverhead) {
+  public MemstoreSize(long dataSize, long heapSize) {
     this.dataSize = dataSize;
-    this.heapOverhead = heapOverhead;
+    this.heapSize = heapSize;
     this.isEmpty = false;
   }
 
-  public void incMemstoreSize(long dataSize, long heapOverhead) {
-    this.dataSize += dataSize;
-    this.heapOverhead += heapOverhead;
+  public void incMemstoreSize(long dataSizeDelta, long heapSizeDelta) {
+    this.dataSize += dataSizeDelta;
+    this.heapSize += heapSizeDelta;
   }
 
-  public void incMemstoreSize(MemstoreSize size) {
-    this.dataSize += size.dataSize;
-    this.heapOverhead += size.heapOverhead;
+  public void incMemstoreSize(MemstoreSize delta) {
+    this.dataSize += delta.dataSize;
+    this.heapSize += delta.heapSize;
   }
 
-  public void decMemstoreSize(long dataSize, long heapOverhead) {
-    this.dataSize -= dataSize;
-    this.heapOverhead -= heapOverhead;
+  public void decMemstoreSize(long dataSizeDelta, long heapSizeDelta) {
+    this.dataSize -= dataSizeDelta;
+    this.heapSize -= heapSizeDelta;
   }
 
-  public void decMemstoreSize(MemstoreSize size) {
-    this.dataSize -= size.dataSize;
-    this.heapOverhead -= size.heapOverhead;
+  public void decMemstoreSize(MemstoreSize delta) {
+    this.dataSize -= delta.dataSize;
+    this.heapSize -= delta.heapSize;
   }
 
   public long getDataSize() {
-
     return isEmpty ? 0 : dataSize;
   }
 
-  public long getHeapOverhead() {
-
-    return isEmpty ? 0 : heapOverhead;
+  public long getHeapSize() {
+    return isEmpty ? 0 : heapSize;
   }
 
   @Override
@@ -89,18 +90,18 @@ public class MemstoreSize {
       return false;
     }
     MemstoreSize other = (MemstoreSize) obj;
-    return getDataSize() == other.dataSize && getHeapOverhead() == other.heapOverhead;
+    return this.dataSize == other.dataSize && this.heapSize == other.heapSize;
   }
 
   @Override
   public int hashCode() {
     long h = 13 * this.dataSize;
-    h = h + 14 * this.heapOverhead;
+    h = h + 14 * this.heapSize;
     return (int) h;
   }
 
   @Override
   public String toString() {
-    return "dataSize=" + this.dataSize + " , heapOverhead=" + this.heapOverhead;
+    return "dataSize=" + this.dataSize + " , heapSize=" + this.heapSize;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
index 3dbd7ad..7361750 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
@@ -85,10 +85,10 @@ public class MutableSegment extends Segment {
             // decreased there as 0 only. Just keeping it as existing code now. We need to know the
             // removed cell is from MSLAB or not. Will do once HBASE-16438 is in
             int cellLen = getCellLength(cur);
-            long heapOverheadDelta = heapOverheadChange(cur, true);
-            this.incSize(-cellLen, -heapOverheadDelta);
+            long heapSize = heapSizeChange(cur, true);
+            this.incSize(-cellLen, -heapSize);
             if (memstoreSize != null) {
-              memstoreSize.decMemstoreSize(cellLen, heapOverheadDelta);
+              memstoreSize.decMemstoreSize(cellLen, heapSize);
             }
             it.remove();
           } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
index 91e28fd..a41a731 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
@@ -39,16 +39,16 @@ public class RegionServerAccounting {
 
   // memstore data size
   private final AtomicLong globalMemstoreDataSize = new AtomicLong(0);
-  // memstore heap over head size
-  private final AtomicLong globalMemstoreHeapOverhead = new AtomicLong(0);
+  // memstore heap size. When off heap MSLAB in place, this will be only heap overhead of the Cell
+  // POJOs and entry overhead of them onto memstore. When on heap MSLAB, this will be include heap
+  // overhead as well as the cell data size. Ya cell data is in on heap area only then.
+  private final AtomicLong globalMemstoreHeapSize = new AtomicLong(0);
 
   // Store the edits size during replaying WAL. Use this to roll back the
   // global memstore size once a region opening failed.
   private final ConcurrentMap<byte[], MemstoreSize> replayEditsPerRegion =
     new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
-  private final Configuration conf;
-
   private long globalMemStoreLimit;
   private final float globalMemStoreLimitLowMarkPercent;
   private long globalMemStoreLimitLowMark;
@@ -57,7 +57,6 @@ public class RegionServerAccounting {
   private long globalOnHeapMemstoreLimitLowMark;
 
   public RegionServerAccounting(Configuration conf) {
-    this.conf = conf;
     Pair<Long, MemoryType> globalMemstoreSizePair = MemorySizeUtil.getGlobalMemstoreSize(conf);
     this.globalMemStoreLimit = globalMemstoreSizePair.getFirst();
     this.memType = globalMemstoreSizePair.getSecond();
@@ -79,16 +78,16 @@ public class RegionServerAccounting {
         (long) (this.globalOnHeapMemstoreLimit * this.globalMemStoreLimitLowMarkPercent);
   }
 
-  public long getGlobalMemstoreLimit() {
+  long getGlobalMemstoreLimit() {
     return this.globalMemStoreLimit;
   }
 
-  public long getOnheapGlobalMemstoreLimit() {
+  long getGlobalOnHeapMemstoreLimit() {
     return this.globalOnHeapMemstoreLimit;
   }
 
   // Called by the tuners.
-  public void setGlobalMemstoreLimits(long newGlobalMemstoreLimit) {
+  void setGlobalMemstoreLimits(long newGlobalMemstoreLimit) {
     if (this.memType == MemoryType.HEAP) {
       this.globalMemStoreLimit = newGlobalMemstoreLimit;
       this.globalMemStoreLimitLowMark =
@@ -100,15 +99,15 @@ public class RegionServerAccounting {
     }
   }
 
-  public boolean isOffheap() {
+  boolean isOffheap() {
     return this.memType == MemoryType.NON_HEAP;
   }
 
-  public long getGlobalMemstoreLimitLowMark() {
+  long getGlobalMemstoreLimitLowMark() {
     return this.globalMemStoreLimitLowMark;
   }
 
-  public float getGlobalMemstoreLimitLowMarkPercent() {
+  float getGlobalMemstoreLimitLowMarkPercent() {
     return this.globalMemStoreLimitLowMarkPercent;
   }
 
@@ -118,24 +117,12 @@ public class RegionServerAccounting {
   public long getGlobalMemstoreDataSize() {
     return globalMemstoreDataSize.get();
   }
-  /**
-   * @return the global memstore heap overhead size in the RegionServer
-   */
-  public long getGlobalMemstoreHeapOverhead() {
-    return this.globalMemstoreHeapOverhead.get();
-  }
 
   /**
-   * @return the global memstore data size and heap overhead size for an onheap memstore
-   * whereas return the heap overhead size for an offheap memstore
+   * @return the global memstore heap size in the RegionServer
    */
-  public long getGlobalMemstoreSize() {
-    if (isOffheap()) {
-      // get only the heap overhead for offheap memstore
-      return getGlobalMemstoreHeapOverhead();
-    } else {
-      return getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead();
-    }
+  public long getGlobalMemstoreHeapSize() {
+    return this.globalMemstoreHeapSize.get();
   }
 
   /**
@@ -144,12 +131,12 @@ public class RegionServerAccounting {
    */
   public void incGlobalMemstoreSize(MemstoreSize memStoreSize) {
     globalMemstoreDataSize.addAndGet(memStoreSize.getDataSize());
-    globalMemstoreHeapOverhead.addAndGet(memStoreSize.getHeapOverhead());
+    globalMemstoreHeapSize.addAndGet(memStoreSize.getHeapSize());
   }
 
   public void decGlobalMemstoreSize(MemstoreSize memStoreSize) {
     globalMemstoreDataSize.addAndGet(-memStoreSize.getDataSize());
-    globalMemstoreHeapOverhead.addAndGet(-memStoreSize.getHeapOverhead());
+    globalMemstoreHeapSize.addAndGet(-memStoreSize.getHeapSize());
   }
 
   /**
@@ -160,7 +147,7 @@ public class RegionServerAccounting {
     // for onheap memstore we check if the global memstore size and the
     // global heap overhead is greater than the global memstore limit
     if (memType == MemoryType.HEAP) {
-      if (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead() >= globalMemStoreLimit) {
+      if (getGlobalMemstoreHeapSize() >= globalMemStoreLimit) {
         return FlushType.ABOVE_ONHEAP_HIGHER_MARK;
       }
     } else {
@@ -175,7 +162,7 @@ public class RegionServerAccounting {
         // Indicates that global memstore size is above the configured
         // 'hbase.regionserver.offheap.global.memstore.size'
         return FlushType.ABOVE_OFFHEAP_HIGHER_MARK;
-      } else if (getGlobalMemstoreHeapOverhead() >= this.globalOnHeapMemstoreLimit) {
+      } else if (getGlobalMemstoreHeapSize() >= this.globalOnHeapMemstoreLimit) {
         // Indicates that the offheap memstore's heap overhead is greater than the
         // configured 'hbase.regionserver.global.memstore.size'.
         return FlushType.ABOVE_ONHEAP_HIGHER_MARK;
@@ -191,7 +178,7 @@ public class RegionServerAccounting {
     // for onheap memstore we check if the global memstore size and the
     // global heap overhead is greater than the global memstore lower mark limit
     if (memType == MemoryType.HEAP) {
-      if (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead() >= globalMemStoreLimitLowMark) {
+      if (getGlobalMemstoreHeapSize() >= globalMemStoreLimitLowMark) {
         return FlushType.ABOVE_ONHEAP_LOWER_MARK;
       }
     } else {
@@ -199,7 +186,7 @@ public class RegionServerAccounting {
         // Indicates that the offheap memstore's data size is greater than the global memstore
         // lower limit
         return FlushType.ABOVE_OFFHEAP_LOWER_MARK;
-      } else if (getGlobalMemstoreHeapOverhead() >= globalOnHeapMemstoreLimitLowMark) {
+      } else if (getGlobalMemstoreHeapSize() >= globalOnHeapMemstoreLimitLowMark) {
         // Indicates that the offheap memstore's heap overhead is greater than the global memstore
         // onheap lower limit
         return FlushType.ABOVE_ONHEAP_LOWER_MARK;
@@ -215,11 +202,10 @@ public class RegionServerAccounting {
    */
   public double getFlushPressure() {
     if (memType == MemoryType.HEAP) {
-      return (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead()) * 1.0
-          / globalMemStoreLimitLowMark;
+      return (getGlobalMemstoreHeapSize()) * 1.0 / globalMemStoreLimitLowMark;
     } else {
       return Math.max(getGlobalMemstoreDataSize() * 1.0 / globalMemStoreLimitLowMark,
-        getGlobalMemstoreHeapOverhead() * 1.0 / globalOnHeapMemstoreLimitLowMark);
+          getGlobalMemstoreHeapSize() * 1.0 / globalOnHeapMemstoreLimitLowMark);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index 11d51d8..452cca8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.ExtendedCell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -62,7 +62,7 @@ public abstract class Segment {
   // Sum of sizes of all Cells added to this Segment. Cell's heapSize is considered. This is not
   // including the heap overhead of this class.
   protected final AtomicLong dataSize;
-  protected final AtomicLong heapOverhead;
+  protected final AtomicLong heapSize;
   protected final TimeRangeTracker timeRangeTracker;
   protected volatile boolean tagsPresent;
 
@@ -71,7 +71,7 @@ public abstract class Segment {
   protected Segment(CellComparator comparator) {
     this.comparator = comparator;
     this.dataSize = new AtomicLong(0);
-    this.heapOverhead = new AtomicLong(0);
+    this.heapSize = new AtomicLong(0);
     this.timeRangeTracker = new TimeRangeTracker();
   }
 
@@ -82,7 +82,7 @@ public abstract class Segment {
     this.minSequenceId = Long.MAX_VALUE;
     this.memStoreLAB = memStoreLAB;
     this.dataSize = new AtomicLong(0);
-    this.heapOverhead = new AtomicLong(0);
+    this.heapSize = new AtomicLong(0);
     this.tagsPresent = false;
     this.timeRangeTracker = new TimeRangeTracker();
   }
@@ -93,7 +93,7 @@ public abstract class Segment {
     this.minSequenceId = segment.getMinSequenceId();
     this.memStoreLAB = segment.getMemStoreLAB();
     this.dataSize = new AtomicLong(segment.keySize());
-    this.heapOverhead = new AtomicLong(segment.heapOverhead.get());
+    this.heapSize = new AtomicLong(segment.heapSize.get());
     this.tagsPresent = segment.isTagsPresent();
     this.timeRangeTracker = segment.getTimeRangeTracker();
   }
@@ -217,22 +217,19 @@ public abstract class Segment {
   }
 
   /**
-   * @return The heap overhead of this segment.
+   * @return The heap size of this segment.
    */
-  public long heapOverhead() {
-    return this.heapOverhead.get();
+  public long heapSize() {
+    return this.heapSize.get();
   }
 
   /**
-   * Updates the heap size counter of the segment by the given delta
+   * Updates the size counters of the segment by the given delta
    */
+  //TODO
   protected void incSize(long delta, long heapOverhead) {
     this.dataSize.addAndGet(delta);
-    this.heapOverhead.addAndGet(heapOverhead);
-  }
-
-  protected void incHeapOverheadSize(long delta) {
-    this.heapOverhead.addAndGet(delta);
+    this.heapSize.addAndGet(heapOverhead);
   }
 
   public long getMinSequenceId() {
@@ -293,10 +290,10 @@ public abstract class Segment {
     if (succ || mslabUsed) {
       cellSize = getCellLength(cellToAdd);
     }
-    long overhead = heapOverheadChange(cellToAdd, succ);
-    incSize(cellSize, overhead);
+    long heapSize = heapSizeChange(cellToAdd, succ);
+    incSize(cellSize, heapSize);
     if (memstoreSize != null) {
-      memstoreSize.incMemstoreSize(cellSize, overhead);
+      memstoreSize.incMemstoreSize(cellSize, heapSize);
     }
     getTimeRangeTracker().includeTimestamp(cellToAdd);
     minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId());
@@ -309,15 +306,14 @@ public abstract class Segment {
     }
   }
 
-  protected long heapOverheadChange(Cell cell, boolean succ) {
+  /**
+   * @return The increase in heap size because of this cell addition. This includes this cell POJO's
+   *         heap size itself and additional overhead because of addition on to CSLM.
+   */
+  protected long heapSizeChange(Cell cell, boolean succ) {
     if (succ) {
-      if (cell instanceof ExtendedCell) {
-        return ClassSize
-            .align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ((ExtendedCell) cell).heapOverhead());
-      }
-      // All cells in server side will be of type ExtendedCell. If not just go with estimation on
-      // the heap overhead considering it is KeyValue.
-      return ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD);
+      return ClassSize
+          .align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + CellUtil.estimatedHeapSizeOf(cell));
     }
     return 0;
   }
@@ -350,9 +346,9 @@ public abstract class Segment {
   public String toString() {
     String res = "Store segment of type "+this.getClass().getName()+"; ";
     res += "isEmpty "+(isEmpty()?"yes":"no")+"; ";
-    res += "cellCount "+getCellsCount()+"; ";
+    res += "cellsCount "+getCellsCount()+"; ";
     res += "cellsSize "+keySize()+"; ";
-    res += "heapOverhead "+heapOverhead()+"; ";
+    res += "totalHeapSize "+heapSize()+"; ";
     res += "Min ts "+getMinTimestamp()+"; ";
     return res;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index bb9e20a..76595f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -291,6 +291,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   /**
    * @return The size of this store's memstore, in bytes
    * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfMemStore()} instead.
+   * <p>
+   * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
+   * is in off heap MSLAB area.
    */
   @Deprecated
   long getMemStoreSize();
@@ -305,6 +308,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
    * {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the size of
    * outstanding snapshots.
    * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeToFlush()} instead.
+   * <p>
+   * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
+   * is in off heap MSLAB area.
    */
   @Deprecated
   long getFlushableSize();
@@ -320,6 +326,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
    * Returns the memstore snapshot size
    * @return size of the memstore snapshot
    * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfSnapshot()} instead.
+   * <p>
+   * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
+   * is in off heap MSLAB area.
    */
   @Deprecated
   long getSnapshotSize();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 63bbe65..09ddd6f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
@@ -563,9 +562,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
 
     // test 1 bucket
     int totalCellsLen = addRowsByKeys(memstore, keys1);
-    long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
+    long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     MemstoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@@ -573,9 +574,9 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
     // totalCellsLen
     totalCellsLen = (totalCellsLen * 3) / 4;
-    totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
+    totalHeapSize = 3 * oneCellOnCAHeapSize;
     assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@@ -599,10 +600,12 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     String[] keys2 = { "A", "B", "D" };
 
     int totalCellsLen1 = addRowsByKeys(memstore, keys1);
-    long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
+    long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
 
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     MemstoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@@ -616,21 +619,21 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     // totalCellsLen
     totalCellsLen1 = (totalCellsLen1 * 3) / 4;
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
-    totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    totalHeapSize = 3 * oneCellOnCAHeapSize;
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     int totalCellsLen2 = addRowsByKeys(memstore, keys2);
-    totalHeapOverhead += 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    totalHeapSize += 3 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    totalHeapSize = 4 * oneCellOnCAHeapSize;
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@@ -655,9 +658,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     String[] keys3 = { "D", "B", "B" };
 
     int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells.
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
     assertEquals(totalCellsLen1, region.getMemstoreSize());
-    long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     MemstoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@@ -668,16 +673,14 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     totalCellsLen1 = (totalCellsLen1 * 3) / 4;
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
     // In memory flush to make a CellArrayMap instead of CSLM. See the overhead diff.
-    totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    totalHeapSize = 3 * oneCellOnCAHeapSize;
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     int totalCellsLen2 = addRowsByKeys(memstore, keys2);// Adding 3 more cells.
-    long totalHeapOverhead2 = 3
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
 
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore) memstore).disableCompaction();
     size = memstore.getFlushableSize();
@@ -685,23 +688,18 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // No change in the cells data size. ie. memstore size. as there is no compaction.
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     int totalCellsLen3 = addRowsByKeys(memstore, keys3);// 3 more cells added
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
         regionServicesForStores.getMemstoreSize());
-    long totalHeapOverhead3 = 3
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
-    assertEquals(totalHeapOverhead + totalHeapOverhead2 + totalHeapOverhead3,
-        ((CompactingMemStore) memstore).heapOverhead());
+    long totalHeapSize3 = 3 * oneCellOnCSLMHeapSize;
+    assertEquals(totalHeapSize + totalHeapSize2 + totalHeapSize3,
+        ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore)memstore).enableCompaction();
     size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
-    while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) {
-      Threads.sleep(10);
-    }
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // active flushed to pipeline and all 3 segments compacted. Will get rid of duplicated cells.
     // Out of total 10, only 4 cells are unique
@@ -710,8 +708,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
         regionServicesForStores.getMemstoreSize());
     // Only 4 unique cells left
-    assertEquals(4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY),
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(4 * oneCellOnCAHeapSize, ((CompactingMemStore) memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@@ -721,15 +718,13 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     assertEquals(0, regionServicesForStores.getMemstoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
-
-    //assertTrue(tstStr, false);
   }
 
   private int addRowsByKeys(final AbstractMemStore hmc, String[] keys) {
     byte[] fam = Bytes.toBytes("testfamily");
     byte[] qf = Bytes.toBytes("testqualifier");
     long size = hmc.getActive().keySize();
-    long heapOverhead = hmc.getActive().heapOverhead();
+    long heapOverhead = hmc.getActive().heapSize();
     int totalLen = 0;
     for (int i = 0; i < keys.length; i++) {
       long timestamp = System.currentTimeMillis();
@@ -742,7 +737,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
       LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp());
     }
     regionServicesForStores.addMemstoreSize(new MemstoreSize(hmc.getActive().keySize() - size,
-        hmc.getActive().heapOverhead() - heapOverhead));
+        hmc.getActive().heapSize() - heapOverhead));
     return totalLen;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
index 748576c..a9f8a97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.Threads;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -80,23 +79,22 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
 
     // test 1 bucket
     long totalCellsLen = addRowsByKeys(memstore, keys1);
-    long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
+    long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     assertEquals(4, memstore.getActive().getCellsCount());
     MemstoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
-    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
-      Threads.sleep(10);
-    }
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
     // totalCellsLen
     totalCellsLen = (totalCellsLen * 3) / 4;
     assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
-    totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
-    assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
+    totalHeapSize = 3 * oneCellOnCAHeapSize;
+    assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
     for ( Segment s : memstore.getSegments()) {
       counter += s.getCellsCount();
     }
@@ -117,16 +115,14 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
     String[] keys2 = { "A", "B", "D" };
 
     long totalCellsLen1 = addRowsByKeys(memstore, keys1);
-    long totalHeapOverhead1 = 4
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
+    long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
     MemstoreSize size = memstore.getFlushableSize();
 
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
-    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
-      Threads.sleep(1000);
-    }
     int counter = 0;
     for ( Segment s : memstore.getSegments()) {
       counter += s.getCellsCount();
@@ -136,27 +132,17 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
     // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
     // totalCellsLen
     totalCellsLen1 = (totalCellsLen1 * 3) / 4;
-    totalHeapOverhead1 = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
+    totalHeapSize1 = 3 * oneCellOnCAHeapSize;
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     long totalCellsLen2 = addRowsByKeys(memstore, keys2);
-    long totalHeapOverhead2 = 3
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     size = memstore.getFlushableSize();
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
-    int i = 0;
-    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
-      Threads.sleep(10);
-      if (i > 10000000) {
-        ((CompactingMemStore) memstore).debug();
-        assertTrue("\n\n<<< Infinite loop! :( \n", false);
-      }
-    }
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     counter = 0;
     for ( Segment s : memstore.getSegments()) {
@@ -165,9 +151,8 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
     assertEquals(4,counter);
     totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    totalHeapOverhead2 = 1 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
-    assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    totalHeapSize2 = 1 * oneCellOnCAHeapSize;
+    assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@@ -186,48 +171,42 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
     String[] keys3 = { "D", "B", "B" };
 
     long totalCellsLen1 = addRowsByKeys(memstore, keys1);
-    long totalHeapOverhead1 = 4
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    int oneCellOnCSLMHeapSize = 120;
+    int oneCellOnCAHeapSize = 88;
+    long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen1, region.getMemstoreSize());
-    assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     MemstoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
 
-    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
-      Threads.sleep(10);
-    }
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
     // totalCellsLen
     totalCellsLen1 = (totalCellsLen1 * 3) / 4;
-    totalHeapOverhead1 = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
+    totalHeapSize1 = 3 * oneCellOnCAHeapSize;
     assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     long totalCellsLen2 = addRowsByKeys(memstore, keys2);
-    long totalHeapOverhead2 = 3
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
 
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore) memstore).disableCompaction();
     size = memstore.getFlushableSize();
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     long totalCellsLen3 = addRowsByKeys(memstore, keys3);
-    long totalHeapOverhead3 = 3
-        * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
+    long totalHeapSize3 = 3 * oneCellOnCSLMHeapSize;
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
         regionServicesForStores.getMemstoreSize());
-    assertEquals(totalHeapOverhead1 + totalHeapOverhead2 + totalHeapOverhead3,
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(totalHeapSize1 + totalHeapSize2 + totalHeapSize3,
+        ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore) memstore).enableCompaction();
     size = memstore.getFlushableSize();
@@ -243,8 +222,7 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
         regionServicesForStores.getMemstoreSize());
     // Only 4 unique cells left
-    assertEquals(4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY),
-        ((CompactingMemStore) memstore).heapOverhead());
+    assertEquals(4 * oneCellOnCAHeapSize, ((CompactingMemStore) memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index e6d3147..e76da5a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -122,8 +122,6 @@ public class TestDefaultMemStore {
     this.memstore.add(kv, sizeChangeForSecondCell);
     // make sure memstore size increase won't double-count MSLAB chunk size
     assertEquals(Segment.getCellLength(kv), sizeChangeForFirstCell.getDataSize());
-    assertEquals(this.memstore.active.heapOverheadChange(kv, true),
-        sizeChangeForFirstCell.getHeapOverhead());
     Segment segment = this.memstore.getActive();
     MemStoreLAB msLab = segment.getMemStoreLAB();
     if (msLab != null) {
@@ -137,7 +135,7 @@ public class TestDefaultMemStore {
     } else {
       // make sure no memstore size change w/o MSLAB
       assertEquals(0, sizeChangeForSecondCell.getDataSize());
-      assertEquals(0, sizeChangeForSecondCell.getHeapOverhead());
+      assertEquals(0, sizeChangeForSecondCell.getHeapSize());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab597077/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 5a09274..e1a8d5f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryType;
 import java.util.Iterator;
 
 import org.apache.hadoop.conf.Configuration;
@@ -928,7 +927,7 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public long getGlobalMemstoreHeapOverhead() {
+    public long getGlobalMemstoreHeapSize() {
       return testMemstoreSize;
     }