You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/02/13 21:10:25 UTC

[14/22] hbase git commit: HBASE-14919 Refactoring for in-memory flush and compaction

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
new file mode 100644
index 0000000..dfcec25
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
@@ -0,0 +1,348 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.htrace.Trace;
+
+/**
+ * This is the scanner for any MemStore implementation, derived from MemStore.
+ * The MemStoreScanner combines SegmentScanner from different Segments and
+ * uses the key-value heap and the reversed key-value heap for the aggregated key-values set.
+ * It is assumed that only traversing forward or backward is used (without zigzagging in between)
+ */
+@InterfaceAudience.Private
+public class MemStoreScanner extends NonLazyKeyValueScanner {
+  /**
+   * Types of cell MemStoreScanner
+   */
+  static public enum Type {
+    UNDEFINED,
+    COMPACT_FORWARD,
+    USER_SCAN_FORWARD,
+    USER_SCAN_BACKWARD
+  }
+
+  // heap of scanners used for traversing forward
+  private KeyValueHeap forwardHeap;
+  // reversed scanners heap for traversing backward
+  private ReversedKeyValueHeap backwardHeap;
+
+  // The type of the scan is defined by constructor
+  // or according to the first usage
+  private Type type = Type.UNDEFINED;
+
+  private long readPoint;
+  // remember the initial version of the scanners list
+  List<SegmentScanner> scanners;
+  // pointer back to the relevant MemStore
+  // is needed for shouldSeek() method
+  private AbstractMemStore backwardReferenceToMemStore;
+
+  /**
+   * Constructor.
+   * If UNDEFINED type for MemStoreScanner is provided, the forward heap is used as default!
+   * After constructor only one heap is going to be initialized for entire lifespan
+   * of the MemStoreScanner. A specific scanner can only be one directional!
+   *
+   * @param ms        Pointer back to the MemStore
+   * @param readPoint Read point below which we can safely remove duplicate KVs
+   * @param type      The scan type COMPACT_FORWARD should be used for compaction
+   */
+  public MemStoreScanner(AbstractMemStore ms, long readPoint, Type type) throws IOException {
+    this(ms, ms.getListOfScanners(readPoint), readPoint, type);
+  }
+
+  /* Constructor used only when the scan usage is unknown
+  and need to be defined according to the first move */
+  public MemStoreScanner(AbstractMemStore ms, long readPt) throws IOException {
+    this(ms, readPt, Type.UNDEFINED);
+  }
+
+  public MemStoreScanner(AbstractMemStore ms, List<SegmentScanner> scanners, long readPoint,
+      Type type) throws IOException {
+    super();
+    this.readPoint = readPoint;
+    this.type = type;
+    switch (type) {
+      case UNDEFINED:
+      case USER_SCAN_FORWARD:
+      case COMPACT_FORWARD:
+        this.forwardHeap = new KeyValueHeap(scanners, ms.getComparator());
+        break;
+      case USER_SCAN_BACKWARD:
+        this.backwardHeap = new ReversedKeyValueHeap(scanners, ms.getComparator());
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown scanner type in MemStoreScanner");
+    }
+    this.backwardReferenceToMemStore = ms;
+    this.scanners = scanners;
+    if (Trace.isTracing() && Trace.currentSpan() != null) {
+      Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner");
+    }
+  }
+
+  /**
+   * Returns the cell from the top-most scanner without advancing the iterator.
+   * The backward traversal is assumed, only if specified explicitly
+   */
+  @Override
+  public synchronized Cell peek() {
+    if (type == Type.USER_SCAN_BACKWARD) {
+      return backwardHeap.peek();
+    }
+    return forwardHeap.peek();
+  }
+
+  /**
+   * Gets the next cell from the top-most scanner. Assumed forward scanning.
+   */
+  @Override
+  public synchronized Cell next() throws IOException {
+    KeyValueHeap heap = (Type.USER_SCAN_BACKWARD == type) ? backwardHeap : forwardHeap;
+
+    // loop over till the next suitable value
+    // take next value from the heap
+    for (Cell currentCell = heap.next();
+         currentCell != null;
+         currentCell = heap.next()) {
+
+      // all the logic of presenting cells is inside the internal SegmentScanners
+      // located inside the heap
+
+      return currentCell;
+    }
+    return null;
+  }
+
+  /**
+   * Set the scanner at the seek key. Assumed forward scanning.
+   * Must be called only once: there is no thread safety between the scanner
+   * and the memStore.
+   *
+   * @param cell seek value
+   * @return false if the key is null or if there is no data
+   */
+  @Override
+  public synchronized boolean seek(Cell cell) throws IOException {
+    assertForward();
+
+    if (cell == null) {
+      close();
+      return false;
+    }
+
+    return forwardHeap.seek(cell);
+  }
+
+  /**
+   * Move forward on the sub-lists set previously by seek. Assumed forward scanning.
+   *
+   * @param cell seek value (should be non-null)
+   * @return true if there is at least one KV to read, false otherwise
+   */
+  @Override
+  public synchronized boolean reseek(Cell cell) throws IOException {
+    /*
+    * See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation.
+    * This code is executed concurrently with flush and puts, without locks.
+    * Two points must be known when working on this code:
+    * 1) It's not possible to use the 'kvTail' and 'snapshot'
+    *  variables, as they are modified during a flush.
+    * 2) The ideal implementation for performance would use the sub skip list
+    *  implicitly pointed by the iterators 'kvsetIt' and
+    *  'snapshotIt'. Unfortunately the Java API does not offer a method to
+    *  get it. So we remember the last keys we iterated to and restore
+    *  the reseeked set to at least that point.
+    *
+    *  TODO: The above comment copied from the original MemStoreScanner
+    */
+    assertForward();
+    return forwardHeap.reseek(cell);
+  }
+
+  /**
+   * MemStoreScanner returns max value as sequence id because it will
+   * always have the latest data among all files.
+   */
+  @Override
+  public synchronized long getSequenceID() {
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  public synchronized void close() {
+
+    if (forwardHeap != null) {
+      assert ((type == Type.USER_SCAN_FORWARD) ||
+          (type == Type.COMPACT_FORWARD) || (type == Type.UNDEFINED));
+      forwardHeap.close();
+      forwardHeap = null;
+      if (backwardHeap != null) {
+        backwardHeap.close();
+        backwardHeap = null;
+      }
+    } else if (backwardHeap != null) {
+      assert (type == Type.USER_SCAN_BACKWARD);
+      backwardHeap.close();
+      backwardHeap = null;
+    }
+  }
+
+  /**
+   * Set the scanner at the seek key. Assumed backward scanning.
+   *
+   * @param cell seek value
+   * @return false if the key is null or if there is no data
+   */
+  @Override
+  public synchronized boolean backwardSeek(Cell cell) throws IOException {
+    initBackwardHeapIfNeeded(cell, false);
+    return backwardHeap.backwardSeek(cell);
+  }
+
+  /**
+   * Assumed backward scanning.
+   *
+   * @param cell seek value
+   * @return false if the key is null or if there is no data
+   */
+  @Override
+  public synchronized boolean seekToPreviousRow(Cell cell) throws IOException {
+    initBackwardHeapIfNeeded(cell, false);
+    if (backwardHeap.peek() == null) {
+      restartBackwardHeap(cell);
+    }
+    return backwardHeap.seekToPreviousRow(cell);
+  }
+
+  @Override
+  public synchronized boolean seekToLastRow() throws IOException {
+    // TODO: it looks like this is how it should be, however ReversedKeyValueHeap class doesn't
+    // implement seekToLastRow() method :(
+    // however seekToLastRow() was implemented in internal MemStoreScanner
+    // so I wonder whether we need to come with our own workaround, or to update
+    // ReversedKeyValueHeap
+    return initBackwardHeapIfNeeded(KeyValue.LOWESTKEY, true);
+  }
+
+  /**
+   * Check if this memstore may contain the required keys
+   * @return False if the key definitely does not exist in this Memstore
+   */
+  @Override
+  public synchronized boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
+
+    if (type == Type.COMPACT_FORWARD) {
+      return true;
+    }
+
+    for (SegmentScanner sc : scanners) {
+      if (sc.shouldSeek(scan, oldestUnexpiredTS)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // debug method
+  @Override
+  public String toString() {
+    StringBuffer buf = new StringBuffer();
+    int i = 1;
+    for (SegmentScanner scanner : scanners) {
+      buf.append("scanner (" + i + ") " + scanner.toString() + " ||| ");
+      i++;
+    }
+    return buf.toString();
+  }
+  /****************** Private methods ******************/
+  /**
+   * Restructure the ended backward heap after rerunning a seekToPreviousRow()
+   * on each scanner
+   * @return false if given Cell does not exist in any scanner
+   */
+  private boolean restartBackwardHeap(Cell cell) throws IOException {
+    boolean res = false;
+    for (SegmentScanner scan : scanners) {
+      res |= scan.seekToPreviousRow(cell);
+    }
+    this.backwardHeap =
+        new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator());
+    return res;
+  }
+
+  /**
+   * Checks whether the type of the scan suits the assumption of moving backward
+   */
+  private boolean initBackwardHeapIfNeeded(Cell cell, boolean toLast) throws IOException {
+    boolean res = false;
+    if (toLast && (type != Type.UNDEFINED)) {
+      throw new IllegalStateException(
+          "Wrong usage of initBackwardHeapIfNeeded in parameters. The type is:" + type.toString());
+    }
+    if (type == Type.UNDEFINED) {
+      // In case we started from peek, release the forward heap
+      // and build backward. Set the correct type. Thus this turn
+      // can happen only once
+      if ((backwardHeap == null) && (forwardHeap != null)) {
+        forwardHeap.close();
+        forwardHeap = null;
+        // before building the heap seek for the relevant key on the scanners,
+        // for the heap to be built from the scanners correctly
+        for (SegmentScanner scan : scanners) {
+          if (toLast) {
+            res |= scan.seekToLastRow();
+          } else {
+            res |= scan.backwardSeek(cell);
+          }
+        }
+        this.backwardHeap =
+            new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator());
+        type = Type.USER_SCAN_BACKWARD;
+      }
+    }
+
+    if (type == Type.USER_SCAN_FORWARD) {
+      throw new IllegalStateException("Traversing backward with forward scan");
+    }
+    return res;
+  }
+
+  /**
+   * Checks whether the type of the scan suits the assumption of moving forward
+   */
+  private void assertForward() throws IllegalStateException {
+    if (type == Type.UNDEFINED) {
+      type = Type.USER_SCAN_FORWARD;
+    }
+
+    if (type == Type.USER_SCAN_BACKWARD) {
+      throw new IllegalStateException("Traversing forward with backward scan");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
index be853c5..28ab693 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
@@ -34,14 +34,13 @@ public class MemStoreSnapshot {
   private final KeyValueScanner scanner;
   private final boolean tagsPresent;
 
-  public MemStoreSnapshot(long id, int cellsCount, long size, TimeRangeTracker timeRangeTracker,
-      KeyValueScanner scanner, boolean tagsPresent) {
+  public MemStoreSnapshot(long id, ImmutableSegment snapshot) {
     this.id = id;
-    this.cellsCount = cellsCount;
-    this.size = size;
-    this.timeRangeTracker = timeRangeTracker;
-    this.scanner = scanner;
-    this.tagsPresent = tagsPresent;
+    this.cellsCount = snapshot.getCellsCount();
+    this.size = snapshot.getSize();
+    this.timeRangeTracker = snapshot.getTimeRangeTracker();
+    this.scanner = snapshot.getKeyValueScanner();
+    this.tagsPresent = snapshot.isTagsPresent();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegment.java
new file mode 100644
index 0000000..743416c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegment.java
@@ -0,0 +1,153 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.Iterator;
+import java.util.SortedSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * This mutable store segment encapsulates a mutable cell set and its respective memory allocation
+ * buffers (MSLAB).
+ */
+@InterfaceAudience.Private
+final class MutableCellSetSegment extends MutableSegment {
+
+  private volatile CellSet cellSet;
+  private final CellComparator comparator;
+
+  // Instantiate objects only using factory
+  MutableCellSetSegment(CellSet cellSet, MemStoreLAB memStoreLAB, long size,
+      CellComparator comparator) {
+    super(memStoreLAB, size);
+    this.cellSet = cellSet;
+    this.comparator = comparator;
+  }
+
+  @Override
+  public SegmentScanner getSegmentScanner(long readPoint) {
+    return new MutableCellSetSegmentScanner(this, readPoint);
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return getCellSet().isEmpty();
+  }
+
+  @Override
+  public int getCellsCount() {
+    return getCellSet().size();
+  }
+
+  @Override
+  public long add(Cell cell) {
+    boolean succ = getCellSet().add(cell);
+    long s = AbstractMemStore.heapSizeChange(cell, succ);
+    updateMetaInfo(cell, s);
+    // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call.
+    // When we use ACL CP or Visibility CP which deals with Tags during
+    // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not
+    // parse the byte[] to identify the tags length.
+    if(cell.getTagsLength() > 0) {
+      tagsPresent = true;
+    }
+    return s;
+  }
+
+  @Override
+  public long rollback(Cell cell) {
+    Cell found = get(cell);
+    if (found != null && found.getSequenceId() == cell.getSequenceId()) {
+      long sz = AbstractMemStore.heapSizeChange(cell, true);
+      remove(cell);
+      incSize(-sz);
+      return sz;
+    }
+    return 0;
+  }
+
+  @Override
+  public Cell getFirstAfter(Cell cell) {
+    SortedSet<Cell> snTailSet = tailSet(cell);
+    if (!snTailSet.isEmpty()) {
+      return snTailSet.first();
+    }
+    return null;
+  }
+
+  @Override
+  public void dump(Log log) {
+    for (Cell cell: getCellSet()) {
+      log.debug(cell);
+    }
+  }
+
+  @Override
+  public SortedSet<Cell> tailSet(Cell firstCell) {
+    return getCellSet().tailSet(firstCell);
+  }
+  @Override
+  public CellSet getCellSet() {
+    return cellSet;
+  }
+  @Override
+  public CellComparator getComparator() {
+    return comparator;
+  }
+
+  //*** Methods for MemStoreSegmentsScanner
+  public Cell last() {
+    return getCellSet().last();
+  }
+
+  public Iterator<Cell> iterator() {
+    return getCellSet().iterator();
+  }
+
+  public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
+    return getCellSet().headSet(firstKeyOnRow);
+  }
+
+  public int compare(Cell left, Cell right) {
+    return getComparator().compare(left, right);
+  }
+
+  public int compareRows(Cell left, Cell right) {
+    return getComparator().compareRows(left, right);
+  }
+
+  private Cell get(Cell cell) {
+    return getCellSet().get(cell);
+  }
+
+  private boolean remove(Cell e) {
+    return getCellSet().remove(e);
+  }
+
+  // methods for tests
+  @Override
+  Cell first() {
+    return this.getCellSet().first();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegmentScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegmentScanner.java
new file mode 100644
index 0000000..17791ff
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableCellSetSegmentScanner.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.SortedSet;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * A scanner of a single cells segment {@link MutableCellSetSegment}.
+ */
+@InterfaceAudience.Private
+class MutableCellSetSegmentScanner extends SegmentScanner {
+
+  // the observed structure
+  private final MutableCellSetSegment segment;
+  // the highest relevant MVCC
+  private long readPoint;
+  // the current iterator that can be reinitialized by
+  // seek(), backwardSeek(), or reseek()
+  private Iterator<Cell> iter;
+  // the pre-calculated cell to be returned by peek()
+  private Cell current = null;
+  // or next()
+  // A flag represents whether could stop skipping KeyValues for MVCC
+  // if have encountered the next row. Only used for reversed scan
+  private boolean stopSkippingKVsIfNextRow = false;
+  // last iterated KVs by seek (to restore the iterator state after reseek)
+  private Cell last = null;
+
+  public MutableCellSetSegmentScanner(MutableCellSetSegment segment, long readPoint) {
+    super();
+    this.segment = segment;
+    this.readPoint = readPoint;
+    iter = segment.iterator();
+    // the initialization of the current is required for working with heap of SegmentScanners
+    current = getNext();
+    //increase the reference count so the underlying structure will not be de-allocated
+    this.segment.incScannerCount();
+  }
+
+  /**
+   * Look at the next Cell in this scanner, but do not iterate the scanner
+   * @return the currently observed Cell
+   */
+  @Override
+  public Cell peek() {          // sanity check, the current should be always valid
+    if (current!=null && current.getSequenceId() > readPoint) {
+      throw new RuntimeException("current is invalid: read point is "+readPoint+", " +
+          "while current sequence id is " +current.getSequenceId());
+    }
+
+    return current;
+  }
+
+  /**
+   * Return the next Cell in this scanner, iterating the scanner
+   * @return the next Cell or null if end of scanner
+   */
+  @Override
+  public Cell next() throws IOException {
+    Cell oldCurrent = current;
+    current = getNext();                  // update the currently observed Cell
+    return oldCurrent;
+  }
+
+  /**
+   * Seek the scanner at or after the specified Cell.
+   * @param cell seek value
+   * @return true if scanner has values left, false if end of scanner
+   */
+  @Override
+  public boolean seek(Cell cell) throws IOException {
+    if(cell == null) {
+      close();
+      return false;
+    }
+    // restart the iterator from new key
+    iter = segment.tailSet(cell).iterator();
+    // last is going to be reinitialized in the next getNext() call
+    last = null;
+    current = getNext();
+    return (current != null);
+  }
+
+  /**
+   * Reseek the scanner at or after the specified KeyValue.
+   * This method is guaranteed to seek at or after the required key only if the
+   * key comes after the current position of the scanner. Should not be used
+   * to seek to a key which may come before the current position.
+   *
+   * @param cell seek value (should be non-null)
+   * @return true if scanner has values left, false if end of scanner
+   */
+  @Override
+  public boolean reseek(Cell cell) throws IOException {
+
+    /*
+    See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation.
+    This code is executed concurrently with flush and puts, without locks.
+    The ideal implementation for performance would use the sub skip list implicitly
+    pointed by the iterator. Unfortunately the Java API does not offer a method to
+    get it. So we remember the last keys we iterated to and restore
+    the reseeked set to at least that point.
+    */
+    iter = segment.tailSet(getHighest(cell, last)).iterator();
+    current = getNext();
+    return (current != null);
+  }
+
+  /**
+   * Seek the scanner at or before the row of specified Cell, it firstly
+   * tries to seek the scanner at or after the specified Cell, return if
+   * peek KeyValue of scanner has the same row with specified Cell,
+   * otherwise seek the scanner at the first Cell of the row which is the
+   * previous row of specified KeyValue
+   *
+   * @param key seek Cell
+   * @return true if the scanner is at the valid KeyValue, false if such Cell does not exist
+   */
+  @Override
+  public boolean backwardSeek(Cell key) throws IOException {
+    seek(key);    // seek forward then go backward
+    if (peek() == null || segment.compareRows(peek(), key) > 0) {
+      return seekToPreviousRow(key);
+    }
+    return true;
+  }
+
+  /**
+   * Seek the scanner at the first Cell of the row which is the previous row
+   * of specified key
+   *
+   * @param cell seek value
+   * @return true if the scanner at the first valid Cell of previous row,
+   *     false if not existing such Cell
+   */
+  @Override
+  public boolean seekToPreviousRow(Cell cell) throws IOException {
+    boolean keepSeeking = false;
+    Cell key = cell;
+
+    do {
+      Cell firstKeyOnRow = CellUtil.createFirstOnRow(key);
+      SortedSet<Cell> cellHead = segment.headSet(firstKeyOnRow);
+      Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last();
+      if (lastCellBeforeRow == null) {
+        current = null;
+        return false;
+      }
+      Cell firstKeyOnPreviousRow = CellUtil.createFirstOnRow(lastCellBeforeRow);
+      this.stopSkippingKVsIfNextRow = true;
+      seek(firstKeyOnPreviousRow);
+      this.stopSkippingKVsIfNextRow = false;
+      if (peek() == null
+          || segment.getComparator().compareRows(peek(), firstKeyOnPreviousRow) > 0) {
+        keepSeeking = true;
+        key = firstKeyOnPreviousRow;
+        continue;
+      } else {
+        keepSeeking = false;
+      }
+    } while (keepSeeking);
+    return true;
+  }
+
+  /**
+   * Seek the scanner at the first KeyValue of last row
+   *
+   * @return true if scanner has values left, false if the underlying data is empty
+   */
+  @Override
+  public boolean seekToLastRow() throws IOException {
+    Cell higherCell = segment.isEmpty() ? null : segment.last();
+    if (higherCell == null) {
+      return false;
+    }
+
+    Cell firstCellOnLastRow = CellUtil.createFirstOnRow(higherCell);
+
+    if (seek(firstCellOnLastRow)) {
+      return true;
+    } else {
+      return seekToPreviousRow(higherCell);
+    }
+  }
+
+  @Override protected Segment getSegment() {
+    return segment;
+  }
+
+  /********************* Private Methods **********************/
+
+  /**
+   * Private internal method for iterating over the segment,
+   * skipping the cells with irrelevant MVCC
+   */
+  private Cell getNext() {
+    Cell startKV = current;
+    Cell next = null;
+
+    try {
+      while (iter.hasNext()) {
+        next = iter.next();
+        if (next.getSequenceId() <= this.readPoint) {
+          return next;                    // skip irrelevant versions
+        }
+        if (stopSkippingKVsIfNextRow &&   // for backwardSeek() stay in the
+            startKV != null &&        // boundaries of a single row
+            segment.compareRows(next, startKV) > 0) {
+          return null;
+        }
+      } // end of while
+
+      return null; // nothing found
+    } finally {
+      if (next != null) {
+        // in all cases, remember the last KV we iterated to, needed for reseek()
+        last = next;
+      }
+    }
+  }
+
+  /**
+   * Private internal method that returns the higher of the two key values, or null
+   * if they are both null
+   */
+  private Cell getHighest(Cell first, Cell second) {
+    if (first == null && second == null) {
+      return null;
+    }
+    if (first != null && second != null) {
+      int compare = segment.compare(first, second);
+      return (compare > 0 ? first : second);
+    }
+    return (first != null ? first : second);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
new file mode 100644
index 0000000..fcaddb0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java
@@ -0,0 +1,57 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.SortedSet;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * An abstraction of a mutable segment in memstore, specifically the active segment.
+ */
+@InterfaceAudience.Private
+public abstract class MutableSegment extends Segment {
+
+  protected MutableSegment(MemStoreLAB memStoreLAB, long size) {
+    super(memStoreLAB, size);
+  }
+
+  /**
+   * Returns a subset of the segment cell set, which starts with the given cell
+   * @param firstCell a cell in the segment
+   * @return a subset of the segment cell set, which starts with the given cell
+   */
+  public abstract SortedSet<Cell> tailSet(Cell firstCell);
+
+  /**
+   * Returns the Cell comparator used by this segment
+   * @return the Cell comparator used by this segment
+   */
+  public abstract CellComparator getComparator();
+
+  //methods for test
+
+  /**
+   * Returns the first cell in the segment
+   * @return the first cell in the segment
+   */
+  abstract Cell first();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
new file mode 100644
index 0000000..7891809
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -0,0 +1,218 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.ByteRange;
+
+/**
+ * This is an abstraction of a segment maintained in a memstore, e.g., the active
+ * cell set or its snapshot.
+ *
+ * This abstraction facilitates the management of the compaction pipeline and the shifts of these
+ * segments from active set to snapshot set in the default implementation.
+ */
+@InterfaceAudience.Private
+public abstract class Segment {
+
+  private volatile MemStoreLAB memStoreLAB;
+  private final AtomicLong size;
+  private final TimeRangeTracker timeRangeTracker;
+  protected volatile boolean tagsPresent;
+
+  protected Segment(MemStoreLAB memStoreLAB, long size) {
+    this.memStoreLAB = memStoreLAB;
+    this.size = new AtomicLong(size);
+    this.timeRangeTracker = new TimeRangeTracker();
+    this.tagsPresent = false;
+  }
+
+  protected Segment(Segment segment) {
+    this.memStoreLAB = segment.getMemStoreLAB();
+    this.size = new AtomicLong(segment.getSize());
+    this.timeRangeTracker = segment.getTimeRangeTracker();
+    this.tagsPresent = segment.isTagsPresent();
+  }
+
+  /**
+   * Creates the scanner that is able to scan the concrete segment
+   * @return a scanner for the given read point
+   */
+  public abstract SegmentScanner getSegmentScanner(long readPoint);
+
+  /**
+   * Returns whether the segment has any cells
+   * @return whether the segment has any cells
+   */
+  public abstract boolean isEmpty();
+
+  /**
+   * Returns number of cells in segment
+   * @return number of cells in segment
+   */
+  public abstract int getCellsCount();
+
+  /**
+   * Adds the given cell into the segment
+   * @return the change in the heap size
+   */
+  public abstract long add(Cell cell);
+
+  /**
+   * Removes the given cell from the segment
+   * @return the change in the heap size
+   */
+  public abstract long rollback(Cell cell);
+
+  /**
+   * Returns the first cell in the segment that has equal or greater key than the given cell
+   * @return the first cell in the segment that has equal or greater key than the given cell
+   */
+  public abstract Cell getFirstAfter(Cell cell);
+
+  /**
+   * Returns a set of all cells in the segment
+   * @return a set of all cells in the segment
+   */
+  public abstract CellSet getCellSet();
+
+  /**
+   * Closing a segment before it is being discarded
+   */
+  public void close() {
+    MemStoreLAB mslab = getMemStoreLAB();
+    if(mslab != null) {
+      mslab.close();
+    }
+    // do not set MSLab to null as scanners may still be reading the data here and need to decrease
+    // the counter when they finish
+  }
+
+  /**
+   * If the segment has a memory allocator the cell is being cloned to this space, and returned;
+   * otherwise the given cell is returned
+   * @return either the given cell or its clone
+   */
+  public Cell maybeCloneWithAllocator(Cell cell) {
+    if (getMemStoreLAB() == null) {
+      return cell;
+    }
+
+    int len = KeyValueUtil.length(cell);
+    ByteRange alloc = getMemStoreLAB().allocateBytes(len);
+    if (alloc == null) {
+      // The allocation was too large, allocator decided
+      // not to do anything with it.
+      return cell;
+    }
+    assert alloc.getBytes() != null;
+    KeyValueUtil.appendToByteArray(cell, alloc.getBytes(), alloc.getOffset());
+    KeyValue newKv = new KeyValue(alloc.getBytes(), alloc.getOffset(), len);
+    newKv.setSequenceId(cell.getSequenceId());
+    return newKv;
+  }
+
+  public boolean shouldSeek(Scan scan, long oldestUnexpiredTS) {
+    return (getTimeRangeTracker().includesTimeRange(scan.getTimeRange())
+        && (getTimeRangeTracker().getMaximumTimestamp() >=
+        oldestUnexpiredTS));
+  }
+
+  public long getMinTimestamp() {
+    return getTimeRangeTracker().getMinimumTimestamp();
+  }
+
+  public boolean isTagsPresent() {
+    return tagsPresent;
+  }
+
+  public void incScannerCount() {
+    if(getMemStoreLAB() != null) {
+      getMemStoreLAB().incScannerCount();
+    }
+  }
+
+  public void decScannerCount() {
+    if(getMemStoreLAB() != null) {
+      getMemStoreLAB().decScannerCount();
+    }
+  }
+
+  /**
+   * Setting the heap size of the segment - used to account for different class overheads
+   * @return this object
+   */
+
+  public Segment setSize(long size) {
+    this.size.set(size);
+    return this;
+  }
+
+  /**
+   * Returns the heap size of the segment
+   * @return the heap size of the segment
+   */
+  public long getSize() {
+    return size.get();
+  }
+
+  /**
+   * Increases the heap size counter of the segment by the given delta
+   */
+  public void incSize(long delta) {
+    size.addAndGet(delta);
+  }
+
+  public TimeRangeTracker getTimeRangeTracker() {
+    return timeRangeTracker;
+  }
+
+  protected void updateMetaInfo(Cell toAdd, long s) {
+    getTimeRangeTracker().includeTimestamp(toAdd);
+    size.addAndGet(s);
+  }
+
+  private MemStoreLAB getMemStoreLAB() {
+    return memStoreLAB;
+  }
+
+  // Debug methods
+  /**
+   * Dumps all cells of the segment into the given log
+   */
+  public abstract void dump(Log log);
+
+  @Override
+  public String toString() {
+    String res = "Store segment of type "+this.getClass().getName()+"; ";
+    res += "isEmpty "+(isEmpty()?"yes":"no")+"; ";
+    res += "cellCount "+getCellsCount()+"; ";
+    res += "size "+getSize()+"; ";
+    res += "Min ts "+getMinTimestamp()+"; ";
+    return res;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
new file mode 100644
index 0000000..ccb11df
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
@@ -0,0 +1,89 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+
+/**
+ * A singleton store segment factory.
+ * Generate concrete store segments.
+ */
+@InterfaceAudience.Private
+public final class SegmentFactory {
+
+  static final String USEMSLAB_KEY = "hbase.hregion.memstore.mslab.enabled";
+  static final boolean USEMSLAB_DEFAULT = true;
+  static final String MSLAB_CLASS_NAME = "hbase.regionserver.mslab.class";
+
+  private SegmentFactory() {}
+  private static SegmentFactory instance = new SegmentFactory();
+  public static SegmentFactory instance() {
+    return instance;
+  }
+
+  public ImmutableSegment createImmutableSegment(final Configuration conf,
+      final CellComparator comparator, long size) {
+    MemStoreLAB memStoreLAB = getMemStoreLAB(conf);
+    MutableSegment segment = generateMutableSegment(conf, comparator, memStoreLAB, size);
+    return createImmutableSegment(conf, segment);
+  }
+
+  public ImmutableSegment createImmutableSegment(CellComparator comparator,
+      long size) {
+    MutableSegment segment = generateMutableSegment(null, comparator, null, size);
+    return createImmutableSegment(null, segment);
+  }
+
+  public ImmutableSegment createImmutableSegment(final Configuration conf, MutableSegment segment) {
+    return generateImmutableSegment(conf, segment);
+  }
+  public MutableSegment createMutableSegment(final Configuration conf,
+      CellComparator comparator, long size) {
+    MemStoreLAB memStoreLAB = getMemStoreLAB(conf);
+    return generateMutableSegment(conf, comparator, memStoreLAB, size);
+  }
+
+  //****** private methods to instantiate concrete store segments **********//
+
+  private ImmutableSegment generateImmutableSegment(final Configuration conf,
+      MutableSegment segment) {
+    // TBD use configuration to set type of segment
+    return new ImmutableSegmentAdapter(segment);
+  }
+  private MutableSegment generateMutableSegment(
+      final Configuration conf, CellComparator comparator, MemStoreLAB memStoreLAB, long size) {
+    // TBD use configuration to set type of segment
+    CellSet set = new CellSet(comparator);
+    return new MutableCellSetSegment(set, memStoreLAB, size, comparator);
+  }
+
+  private MemStoreLAB getMemStoreLAB(Configuration conf) {
+    MemStoreLAB memStoreLAB = null;
+    if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) {
+      String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName());
+      memStoreLAB = ReflectionUtils.instantiateWithCustomCtor(className,
+          new Class[] { Configuration.class }, new Object[] { conf });
+    }
+    return memStoreLAB;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
new file mode 100644
index 0000000..8852d5c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
@@ -0,0 +1,152 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+
+/**
+ * An abstraction for store segment scanner.
+ */
+@InterfaceAudience.Private
+public abstract class SegmentScanner implements KeyValueScanner {
+
+  private long sequenceID = Long.MAX_VALUE;
+
+  protected abstract Segment getSegment();
+
+  /**
+   * Get the sequence id associated with this KeyValueScanner. This is required
+   * for comparing multiple files (or memstore segments) scanners to find out
+   * which one has the latest data.
+   *
+   */
+  @Override
+  public long getSequenceID() {
+    return sequenceID;
+  }
+
+  /**
+   * Close the KeyValue scanner.
+   */
+  @Override
+  public void close() {
+    getSegment().decScannerCount();
+  }
+
+  /**
+   * This functionality should be resolved in the higher level which is
+   * MemStoreScanner, currently returns true as default. Doesn't throw
+   * IllegalStateException in order not to change the signature of the
+   * overridden method
+   */
+  @Override
+  public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
+    return true;
+  }
+  /**
+   * This scanner is working solely on the in-memory MemStore therefore this
+   * interface is not relevant.
+   */
+  @Override
+  public boolean requestSeek(Cell c, boolean forward, boolean useBloom)
+      throws IOException {
+
+    throw new IllegalStateException(
+        "requestSeek cannot be called on MutableCellSetSegmentScanner");
+  }
+
+  /**
+   * This scanner is working solely on the in-memory MemStore and doesn't work on
+   * store files, MutableCellSetSegmentScanner always does the seek,
+   * therefore always returning true.
+   */
+  @Override
+  public boolean realSeekDone() {
+    return true;
+  }
+
+  /**
+   * This function should be never called on scanners that always do real seek operations (i.e. most
+   * of the scanners and also this one). The easiest way to achieve this is to call
+   * {@link #realSeekDone()} first.
+   */
+  @Override
+  public void enforceSeek() throws IOException {
+    throw new IllegalStateException(
+        "enforceSeek cannot be called on MutableCellSetSegmentScanner");
+  }
+
+  /**
+   * @return true if this is a file scanner. Otherwise a memory scanner is assumed.
+   */
+  @Override
+  public boolean isFileScanner() {
+    return false;
+  }
+
+  /**
+   * @return the next key in the index (the key to seek to the next block)
+   *     if known, or null otherwise
+   *     Not relevant for in-memory scanner
+   */
+  @Override
+  public Cell getNextIndexedKey() {
+    return null;
+  }
+
+  /**
+   * Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between
+   * cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.
+   */
+  @Override
+  public void shipped() throws IOException {
+    // do nothing
+  }
+
+  /**
+   * Set the sequence id of the scanner.
+   * This is used to determine an order between memory segment scanners.
+   * @param x a unique sequence id
+   */
+  public void setSequenceID(long x) {
+    sequenceID = x;
+  }
+
+  /**
+   * Returns whether the given scan should seek in this segment
+   * @return whether the given scan should seek in this segment
+   */
+  public boolean shouldSeek(Scan scan, long oldestUnexpiredTS) {
+    return getSegment().shouldSeek(scan,oldestUnexpiredTS);
+  }
+
+  //debug method
+  @Override
+  public String toString() {
+    String res = "Store segment scanner of type "+this.getClass().getName()+"; ";
+    res += "sequence id "+getSequenceID()+"; ";
+    res += getSegment().toString();
+    return res;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
index 34ba1fa..f4f25dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 4f30960..5c79d72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -19,22 +19,6 @@
 
 package org.apache.hadoop.hbase.io;
 
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.RuntimeMXBean;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.KeyValue;
@@ -42,9 +26,9 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-import org.apache.hadoop.hbase.io.hfile.LruCachedBlock;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
-import org.apache.hadoop.hbase.regionserver.CellSkipListSet;
+import org.apache.hadoop.hbase.io.hfile.LruCachedBlock;
+import org.apache.hadoop.hbase.regionserver.CellSet;
 import org.apache.hadoop.hbase.regionserver.DefaultMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -56,6 +40,22 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -237,8 +237,8 @@ public class TestHeapSize  {
       assertEquals(expected, actual);
     }
 
-    // CellSkipListSet
-    cl = CellSkipListSet.class;
+    // CellSet
+    cl = CellSet.class;
     expected = ClassSize.estimateBase(cl, false);
     actual = ClassSize.CELL_SKIPLIST_SET;
     if (expected != actual) {
@@ -305,15 +305,16 @@ public class TestHeapSize  {
     // DefaultMemStore Deep Overhead
     actual = DefaultMemStore.DEEP_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
-    expected += ClassSize.estimateBase(AtomicLong.class, false);
-    expected += (2 * ClassSize.estimateBase(CellSkipListSet.class, false));
+    expected += (2 * ClassSize.estimateBase(AtomicLong.class, false));
+    expected += (2 * ClassSize.estimateBase(CellSet.class, false));
     expected += (2 * ClassSize.estimateBase(ConcurrentSkipListMap.class, false));
     expected += (2 * ClassSize.estimateBase(TimeRangeTracker.class, false));
     if(expected != actual) {
       ClassSize.estimateBase(cl, true);
       ClassSize.estimateBase(AtomicLong.class, true);
-      ClassSize.estimateBase(CellSkipListSet.class, true);
-      ClassSize.estimateBase(CellSkipListSet.class, true);
+      ClassSize.estimateBase(AtomicLong.class, true);
+      ClassSize.estimateBase(CellSet.class, true);
+      ClassSize.estimateBase(CellSet.class, true);
       ClassSize.estimateBase(ConcurrentSkipListMap.class, true);
       ClassSize.estimateBase(ConcurrentSkipListMap.class, true);
       ClassSize.estimateBase(TimeRangeTracker.class, true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java
index 684839d..e0cc39f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java
@@ -18,11 +18,7 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.util.Iterator;
-import java.util.SortedSet;
-
 import junit.framework.TestCase;
-
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
@@ -32,10 +28,13 @@ import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.experimental.categories.Category;
 
+import java.util.Iterator;
+import java.util.SortedSet;
+
 @Category({RegionServerTests.class, SmallTests.class})
 public class TestCellSkipListSet extends TestCase {
-  private final CellSkipListSet csls =
-    new CellSkipListSet(CellComparator.COMPARATOR);
+  private final CellSet csls =
+    new CellSet(CellComparator.COMPARATOR);
 
   protected void setUp() throws Exception {
     super.setUp();
@@ -163,4 +162,4 @@ public class TestCellSkipListSet extends TestCase {
     assertTrue(Bytes.equals(head.first().getValueArray(), head.first().getValueOffset(),
       head.first().getValueLength(), value2, 0, value2.length));
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index ec70740..5e6007d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -18,17 +18,10 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -57,12 +50,14 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.base.Joiner;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 /** memstore test case */
 @Category({RegionServerTests.class, MediumTests.class})
@@ -89,11 +84,9 @@ public class TestDefaultMemStore extends TestCase {
     byte [] other = Bytes.toBytes("somethingelse");
     KeyValue samekey = new KeyValue(bytes, bytes, bytes, other);
     this.memstore.add(samekey);
-    Cell found = this.memstore.cellSet.first();
-    assertEquals(1, this.memstore.cellSet.size());
-    assertTrue(
-      Bytes.toString(found.getValueArray(), found.getValueOffset(), found.getValueLength()),
-      CellUtil.matchingValue(samekey, found));
+    Cell found = this.memstore.getActive().first();
+    assertEquals(1, this.memstore.getActive().getCellsCount());
+    assertTrue(Bytes.toString(found.getValueArray()), CellUtil.matchingValue(samekey, found));
   }
 
   /**
@@ -108,7 +101,7 @@ public class TestDefaultMemStore extends TestCase {
     Configuration conf = HBaseConfiguration.create();
     ScanInfo scanInfo =
         new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, KeepDeletedCells.FALSE, 0,
-            this.memstore.comparator);
+            this.memstore.getComparator());
     ScanType scanType = ScanType.USER_SCAN;
     StoreScanner s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
     int count = 0;
@@ -476,7 +469,7 @@ public class TestDefaultMemStore extends TestCase {
     for (int i = 0; i < snapshotCount; i++) {
       addRows(this.memstore);
       runSnapshot(this.memstore);
-      assertEquals("History not being cleared", 0, this.memstore.snapshot.size());
+      assertEquals("History not being cleared", 0, this.memstore.getSnapshot().getCellsCount());
     }
   }
 
@@ -497,7 +490,7 @@ public class TestDefaultMemStore extends TestCase {
     m.add(key2);
 
     assertTrue("Expected memstore to hold 3 values, actually has " +
-        m.cellSet.size(), m.cellSet.size() == 3);
+        m.getActive().getCellsCount(), m.getActive().getCellsCount() == 3);
   }
 
   //////////////////////////////////////////////////////////////////////////////
@@ -529,7 +522,7 @@ public class TestDefaultMemStore extends TestCase {
     Configuration conf = HBaseConfiguration.create();
     for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
       ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE,
-        KeepDeletedCells.FALSE, 0, this.memstore.comparator);
+        KeepDeletedCells.FALSE, 0, this.memstore.getComparator());
       ScanType scanType = ScanType.USER_SCAN;
       InternalScanner scanner = new StoreScanner(new Scan(
           Bytes.toBytes(startRowId)), scanInfo, scanType, null,
@@ -570,12 +563,12 @@ public class TestDefaultMemStore extends TestCase {
     memstore.add(new KeyValue(row, fam ,qf3, val));
     //Creating a snapshot
     memstore.snapshot();
-    assertEquals(3, memstore.snapshot.size());
+    assertEquals(3, memstore.getSnapshot().getCellsCount());
     //Adding value to "new" memstore
-    assertEquals(0, memstore.cellSet.size());
+    assertEquals(0, memstore.getActive().getCellsCount());
     memstore.add(new KeyValue(row, fam ,qf4, val));
     memstore.add(new KeyValue(row, fam ,qf5, val));
-    assertEquals(2, memstore.cellSet.size());
+    assertEquals(2, memstore.getActive().getCellsCount());
   }
 
   //////////////////////////////////////////////////////////////////////////////
@@ -597,7 +590,7 @@ public class TestDefaultMemStore extends TestCase {
     memstore.add(put2);
     memstore.add(put3);
 
-    assertEquals(3, memstore.cellSet.size());
+    assertEquals(3, memstore.getActive().getCellsCount());
 
     KeyValue del2 = new KeyValue(row, fam, qf1, ts2, KeyValue.Type.Delete, val);
     memstore.delete(del2);
@@ -608,9 +601,9 @@ public class TestDefaultMemStore extends TestCase {
     expected.add(put2);
     expected.add(put1);
 
-    assertEquals(4, memstore.cellSet.size());
+    assertEquals(4, memstore.getActive().getCellsCount());
     int i = 0;
-    for(Cell cell : memstore.cellSet) {
+    for(Cell cell : memstore.getActive().getCellSet()) {
       assertEquals(expected.get(i++), cell);
     }
   }
@@ -631,7 +624,7 @@ public class TestDefaultMemStore extends TestCase {
     memstore.add(put2);
     memstore.add(put3);
 
-    assertEquals(3, memstore.cellSet.size());
+    assertEquals(3, memstore.getActive().getCellsCount());
 
     KeyValue del2 =
       new KeyValue(row, fam, qf1, ts2, KeyValue.Type.DeleteColumn, val);
@@ -644,9 +637,9 @@ public class TestDefaultMemStore extends TestCase {
     expected.add(put1);
 
 
-    assertEquals(4, memstore.cellSet.size());
+    assertEquals(4, memstore.getActive().getCellsCount());
     int i = 0;
-    for (Cell cell: memstore.cellSet) {
+    for (Cell cell: memstore.getActive().getCellSet()) {
       assertEquals(expected.get(i++), cell);
     }
   }
@@ -684,9 +677,9 @@ public class TestDefaultMemStore extends TestCase {
 
 
 
-    assertEquals(5, memstore.cellSet.size());
+    assertEquals(5, memstore.getActive().getCellsCount());
     int i = 0;
-    for (Cell cell: memstore.cellSet) {
+    for (Cell cell: memstore.getActive().getCellSet()) {
       assertEquals(expected.get(i++), cell);
     }
   }
@@ -700,8 +693,8 @@ public class TestDefaultMemStore extends TestCase {
     memstore.add(new KeyValue(row, fam, qf, ts, val));
     KeyValue delete = new KeyValue(row, fam, qf, ts, KeyValue.Type.Delete, val);
     memstore.delete(delete);
-    assertEquals(2, memstore.cellSet.size());
-    assertEquals(delete, memstore.cellSet.first());
+    assertEquals(2, memstore.getActive().getCellsCount());
+    assertEquals(delete, memstore.getActive().first());
   }
 
   public void testRetainsDeleteVersion() throws IOException {
@@ -713,8 +706,8 @@ public class TestDefaultMemStore extends TestCase {
         "row1", "fam", "a", 100, KeyValue.Type.Delete, "dont-care");
     memstore.delete(delete);
 
-    assertEquals(2, memstore.cellSet.size());
-    assertEquals(delete, memstore.cellSet.first());
+    assertEquals(2, memstore.getActive().getCellsCount());
+    assertEquals(delete, memstore.getActive().first());
   }
   public void testRetainsDeleteColumn() throws IOException {
     // add a put to memstore
@@ -725,8 +718,8 @@ public class TestDefaultMemStore extends TestCase {
         KeyValue.Type.DeleteColumn, "dont-care");
     memstore.delete(delete);
 
-    assertEquals(2, memstore.cellSet.size());
-    assertEquals(delete, memstore.cellSet.first());
+    assertEquals(2, memstore.getActive().getCellsCount());
+    assertEquals(delete, memstore.getActive().first());
   }
   public void testRetainsDeleteFamily() throws IOException {
     // add a put to memstore
@@ -737,43 +730,8 @@ public class TestDefaultMemStore extends TestCase {
         KeyValue.Type.DeleteFamily, "dont-care");
     memstore.delete(delete);
 
-    assertEquals(2, memstore.cellSet.size());
-    assertEquals(delete, memstore.cellSet.first());
-  }
-
-  ////////////////////////////////////
-  //Test for timestamps
-  ////////////////////////////////////
-
-  /**
-   * Test to ensure correctness when using Memstore with multiple timestamps
-   */
-  public void testMultipleTimestamps() throws Exception {
-    long[] timestamps = new long[] {20,10,5,1};
-    Scan scan = new Scan();
-
-    for (long timestamp: timestamps)
-      addRows(memstore,timestamp);
-
-    byte[] fam = Bytes.toBytes("fam");
-    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
-    when(hcd.getName()).thenReturn(fam);
-    Store store = mock(Store.class);
-    when(store.getFamily()).thenReturn(hcd);
-    scan.setColumnFamilyTimeRange(fam, 0, 2);
-    assertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
-
-    scan.setColumnFamilyTimeRange(fam, 20, 82);
-    assertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
-
-    scan.setColumnFamilyTimeRange(fam, 10, 20);
-    assertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
-
-    scan.setColumnFamilyTimeRange(fam, 8, 12);
-    assertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
-
-    scan.setColumnFamilyTimeRange(fam, 28, 42);
-    assertTrue(!memstore.shouldSeek(scan, store, Long.MIN_VALUE));
+    assertEquals(2, memstore.getActive().getCellsCount());
+    assertEquals(delete, memstore.getActive().first());
   }
 
   ////////////////////////////////////
@@ -795,7 +753,7 @@ public class TestDefaultMemStore extends TestCase {
    */
   public void testUpsertMSLAB() throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    conf.setBoolean(DefaultMemStore.USEMSLAB_KEY, true);
+    conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true);
     memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR);
 
     int ROW_SIZE = 2048;
@@ -838,7 +796,7 @@ public class TestDefaultMemStore extends TestCase {
   public void testUpsertMemstoreSize() throws Exception {
     Configuration conf = HBaseConfiguration.create();
     memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR);
-    long oldSize = memstore.size.get();
+    long oldSize = memstore.size();
 
     List<Cell> l = new ArrayList<Cell>();
     KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
@@ -849,18 +807,18 @@ public class TestDefaultMemStore extends TestCase {
     l.add(kv1); l.add(kv2); l.add(kv3);
 
     this.memstore.upsert(l, 2);// readpoint is 2
-    long newSize = this.memstore.size.get();
+    long newSize = this.memstore.size();
     assert(newSize > oldSize);
     //The kv1 should be removed.
-    assert(memstore.cellSet.size() == 2);
+    assert(memstore.getActive().getCellsCount() == 2);
 
     KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v");
     kv4.setSequenceId(1);
     l.clear(); l.add(kv4);
     this.memstore.upsert(l, 3);
-    assertEquals(newSize, this.memstore.size.get());
+    assertEquals(newSize, this.memstore.size());
     //The kv2 should be removed.
-    assert(memstore.cellSet.size() == 2);
+    assert(memstore.getActive().getCellsCount() == 2);
     //this.memstore = null;
   }
 
@@ -1021,10 +979,11 @@ public class TestDefaultMemStore extends TestCase {
 
   private long runSnapshot(final DefaultMemStore hmc) throws UnexpectedStateException {
     // Save off old state.
-    int oldHistorySize = hmc.snapshot.size();
+    int oldHistorySize = hmc.getSnapshot().getCellsCount();
     MemStoreSnapshot snapshot = hmc.snapshot();
     // Make some assertions about what just happened.
-    assertTrue("History size has not increased", oldHistorySize < hmc.snapshot.size());
+    assertTrue("History size has not increased", oldHistorySize < hmc.getSnapshot().getCellsCount
+        ());
     long t = memstore.timeOfOldestEdit();
     assertTrue("Time of oldest edit is not Long.MAX_VALUE", t == Long.MAX_VALUE);
     hmc.clearSnapshot(snapshot.getId());

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 385048c..b237490 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -18,20 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.IOException;
-import java.security.Key;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.concurrent.ConcurrentSkipListSet;
-
-import javax.crypto.spec.SecretKeySpec;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -74,6 +60,19 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 
+import javax.crypto.spec.SecretKeySpec;
+import java.io.IOException;
+import java.security.Key;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.concurrent.ConcurrentSkipListSet;
+
 @Category(MediumTests.class)
 public class TestHMobStore {
   public static final Log LOG = LogFactory.getLog(TestHMobStore.class);
@@ -468,7 +467,7 @@ public class TestHMobStore {
     this.store.snapshot();
     flushStore(store, id++);
     Assert.assertEquals(storeFilesSize, this.store.getStorefiles().size());
-    Assert.assertEquals(0, ((DefaultMemStore)this.store.memstore).cellSet.size());
+    Assert.assertEquals(0, ((AbstractMemStore)this.store.memstore).getActive().getCellsCount());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 7add8a9..a5574d3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -18,54 +18,10 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-
-import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.LAST_CHAR;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.protobuf.ByteString;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -176,10 +132,52 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.LAST_CHAR;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Basic stand-alone testing of HRegion.  No clusters!
@@ -302,8 +300,6 @@ public class TestHRegion {
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
-
-
   /*
    * This test is for verifying memstore snapshot size is correctly updated in case of rollback
    * See HBASE-10845
@@ -332,7 +328,7 @@ public class TestHRegion {
     Path rootDir = new Path(dir + "testMemstoreSnapshotSize");
     MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF);
     HRegion region = initHRegion(tableName, null, null, name.getMethodName(),
-        CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES);
+      CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES);
 
     Store store = region.getStore(COLUMN_FAMILY_BYTES);
     // Get some random bytes.
@@ -1289,7 +1285,8 @@ public class TestHRegion {
     private final AtomicInteger count;
     private Exception e;
 
-    GetTillDoneOrException(final int i, final byte[] r, final AtomicBoolean d, final AtomicInteger c) {
+    GetTillDoneOrException(final int i, final byte[] r, final AtomicBoolean d,
+        final AtomicInteger c) {
       super("getter." + i);
       this.g = new Get(r);
       this.done = d;
@@ -2452,10 +2449,10 @@ public class TestHRegion {
       // This is kinda hacky, but better than nothing...
       long now = System.currentTimeMillis();
       DefaultMemStore memstore = (DefaultMemStore) ((HStore) region.getStore(fam1)).memstore;
-      Cell firstCell = memstore.cellSet.first();
+      Cell firstCell = memstore.getActive().first();
       assertTrue(firstCell.getTimestamp() <= now);
       now = firstCell.getTimestamp();
-      for (Cell cell : memstore.cellSet) {
+      for (Cell cell : memstore.getActive().getCellSet()) {
         assertTrue(cell.getTimestamp() <= now);
         now = cell.getTimestamp();
       }
@@ -2782,7 +2779,8 @@ public class TestHRegion {
       } catch (NotServingRegionException e) {
         // this is the correct exception that is expected
       } catch (IOException e) {
-        fail("Got wrong type of exception - should be a NotServingRegionException, but was an IOException: "
+        fail("Got wrong type of exception - should be a NotServingRegionException, " +
+            "but was an IOException: "
             + e.getMessage());
       }
     } finally {
@@ -2980,7 +2978,8 @@ public class TestHRegion {
   }
 
   @Test
-  public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws IOException {
+  public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws
+      IOException {
     byte[] row1 = Bytes.toBytes("row1");
     byte[] fam1 = Bytes.toBytes("fam1");
     byte[][] families = { fam1 };
@@ -4978,7 +4977,8 @@ public class TestHRegion {
       // move the file of the primary region to the archive, simulating a compaction
       Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
       primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
-      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem().getStoreFiles(families[0]);
+      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem()
+          .getStoreFiles(families[0]);
       Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
 
       verifyData(secondaryRegion, 0, 1000, cq, families);
@@ -4992,7 +4992,8 @@ public class TestHRegion {
     }
   }
 
-  private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws IOException {
+  private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws
+      IOException {
     putData(this.region, startRow, numRows, qf, families);
   }
 
@@ -5085,7 +5086,6 @@ public class TestHRegion {
 
   /**
    * Test that we get the expected flush results back
-   * @throws IOException
    */
   @Test
   public void testFlushResult() throws IOException {
@@ -5138,11 +5138,6 @@ public class TestHRegion {
   }
 
   /**
-   * @param tableName
-   * @param callingMethod
-   * @param conf
-   * @param families
-   * @throws IOException
    * @return A region on which you must call
    *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    */
@@ -5152,12 +5147,6 @@ public class TestHRegion {
   }
 
   /**
-   * @param tableName
-   * @param callingMethod
-   * @param conf
-   * @param isReadOnly
-   * @param families
-   * @throws IOException
    * @return A region on which you must call
    *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    */
@@ -5177,14 +5166,6 @@ public class TestHRegion {
   }
 
   /**
-   * @param tableName
-   * @param startKey
-   * @param stopKey
-   * @param callingMethod
-   * @param conf
-   * @param isReadOnly
-   * @param families
-   * @throws IOException
    * @return A region on which you must call
    *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    */
@@ -5676,7 +5657,8 @@ public class TestHRegion {
       currRow.clear();
       hasNext = scanner.next(currRow);
       assertEquals(2, currRow.size());
-      assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow.get(0).getRowLength(), row4, 0,
+      assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(),
+          currRow.get(0).getRowLength(), row4, 0,
         row4.length));
       assertTrue(hasNext);
       // 2. scan out "row3" (2 kv)
@@ -6088,7 +6070,7 @@ public class TestHRegion {
   public void testOpenRegionWrittenToWALForLogReplay() throws Exception {
     // similar to the above test but with distributed log replay
     final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWALForLogReplay",
-      100, 42);
+        100, 42);
     final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
 
     HTableDescriptor htd

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
index 80333e8..b5e9798 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
@@ -18,12 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-import java.util.Random;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -36,6 +30,13 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Test the {@link MemStoreChunkPool} class
  */
@@ -47,7 +48,7 @@ public class TestMemStoreChunkPool {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    conf.setBoolean(DefaultMemStore.USEMSLAB_KEY, true);
+    conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true);
     conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
     chunkPoolDisabledBeforeTest = MemStoreChunkPool.chunkPoolDisabled;
     MemStoreChunkPool.chunkPoolDisabled = false;
@@ -116,13 +117,13 @@ public class TestMemStoreChunkPool {
 
     // Creating a snapshot
     MemStoreSnapshot snapshot = memstore.snapshot();
-    assertEquals(3, memstore.snapshot.size());
+    assertEquals(3, memstore.getSnapshot().getCellsCount());
 
     // Adding value to "new" memstore
-    assertEquals(0, memstore.cellSet.size());
+    assertEquals(0, memstore.getActive().getCellsCount());
     memstore.add(new KeyValue(row, fam, qf4, val));
     memstore.add(new KeyValue(row, fam, qf5, val));
-    assertEquals(2, memstore.cellSet.size());
+    assertEquals(2, memstore.getActive().getCellsCount());
     memstore.clearSnapshot(snapshot.getId());
 
     int chunkCount = chunkPool.getPoolSize();
@@ -132,7 +133,7 @@ public class TestMemStoreChunkPool {
 
   @Test
   public void testPuttingBackChunksWithOpeningScanner()
-      throws UnexpectedStateException {
+      throws IOException {
     byte[] row = Bytes.toBytes("testrow");
     byte[] fam = Bytes.toBytes("testfamily");
     byte[] qf1 = Bytes.toBytes("testqualifier1");
@@ -153,13 +154,13 @@ public class TestMemStoreChunkPool {
 
     // Creating a snapshot
     MemStoreSnapshot snapshot = memstore.snapshot();
-    assertEquals(3, memstore.snapshot.size());
+    assertEquals(3, memstore.getSnapshot().getCellsCount());
 
     // Adding value to "new" memstore
-    assertEquals(0, memstore.cellSet.size());
+    assertEquals(0, memstore.getActive().getCellsCount());
     memstore.add(new KeyValue(row, fam, qf4, val));
     memstore.add(new KeyValue(row, fam, qf5, val));
-    assertEquals(2, memstore.cellSet.size());
+    assertEquals(2, memstore.getActive().getCellsCount());
 
     // opening scanner before clear the snapshot
     List<KeyValueScanner> scanners = memstore.getScanners(0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25dfc112/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
index 354ea2d..0a67ff8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
@@ -27,6 +27,7 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
+import com.google.common.collect.Lists;
 import java.io.IOException;
 import java.lang.ref.SoftReference;
 import java.security.PrivilegedExceptionAction;
@@ -92,8 +93,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 
-import com.google.common.collect.Lists;
-
 /**
  * Test class for the Store
  */
@@ -555,7 +554,7 @@ public class TestStore {
     this.store.snapshot();
     flushStore(store, id++);
     Assert.assertEquals(storeFilessize, this.store.getStorefiles().size());
-    Assert.assertEquals(0, ((DefaultMemStore)this.store.memstore).cellSet.size());
+    Assert.assertEquals(0, ((AbstractMemStore)this.store.memstore).getActive().getCellsCount());
   }
 
   private void assertCheck() {
@@ -600,7 +599,7 @@ public class TestStore {
     flushStore(store, id++);
     Assert.assertEquals(1, this.store.getStorefiles().size());
     // from the one we inserted up there, and a new one
-    Assert.assertEquals(2, ((DefaultMemStore)this.store.memstore).cellSet.size());
+    Assert.assertEquals(2, ((AbstractMemStore)this.store.memstore).getActive().getCellsCount());
 
     // how many key/values for this row are there?
     Get get = new Get(row);
@@ -669,7 +668,7 @@ public class TestStore {
     }
 
     long computedSize=0;
-    for (Cell cell : ((DefaultMemStore)this.store.memstore).cellSet) {
+    for (Cell cell : ((AbstractMemStore)this.store.memstore).getActive().getCellSet()) {
       long kvsize = DefaultMemStore.heapSizeChange(cell, true);
       //System.out.println(kv + " size= " + kvsize + " kvsize= " + kv.heapSize());
       computedSize += kvsize;
@@ -701,7 +700,7 @@ public class TestStore {
     // then flush.
     flushStore(store, id++);
     Assert.assertEquals(1, this.store.getStorefiles().size());
-    Assert.assertEquals(1, ((DefaultMemStore)this.store.memstore).cellSet.size());
+    Assert.assertEquals(1, ((AbstractMemStore)this.store.memstore).getActive().getCellsCount());
 
     // now increment again:
     newValue += 1;