You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2017/09/25 01:58:48 UTC

[06/11] hbase git commit: HBASE-18825 Use HStoreFile instead of StoreFile in our own code base and remove unnecessary methods in StoreFile interface

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
index e677ed1..9dc8246 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
@@ -30,9 +30,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
 import org.apache.hadoop.hbase.regionserver.StripeStoreFlusher;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ConcatenatedLists;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
 
@@ -63,12 +63,12 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     stripePolicy = new ExploringCompactionPolicy(conf, storeConfigInfo);
   }
 
-  public List<StoreFile> preSelectFilesForCoprocessor(StripeInformationProvider si,
-      List<StoreFile> filesCompacting) {
+  public List<HStoreFile> preSelectFilesForCoprocessor(StripeInformationProvider si,
+      List<HStoreFile> filesCompacting) {
     // We sincerely hope nobody is messing with us with their coprocessors.
     // If they do, they are very likely to shoot themselves in the foot.
     // We'll just exclude all the filesCompacting from the list.
-    ArrayList<StoreFile> candidateFiles = new ArrayList<>(si.getStorefiles());
+    ArrayList<HStoreFile> candidateFiles = new ArrayList<>(si.getStorefiles());
     candidateFiles.removeAll(filesCompacting);
     return candidateFiles;
   }
@@ -102,7 +102,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
   }
 
   public StripeCompactionRequest selectCompaction(StripeInformationProvider si,
-      List<StoreFile> filesCompacting, boolean isOffpeak) throws IOException {
+      List<HStoreFile> filesCompacting, boolean isOffpeak) throws IOException {
     // TODO: first cut - no parallel compactions. To have more fine grained control we
     //       probably need structure more sophisticated than a list.
     if (!filesCompacting.isEmpty()) {
@@ -116,7 +116,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
 
     // This can happen due to region split. We can skip it later; for now preserve
     // compact-all-things behavior.
-    Collection<StoreFile> allFiles = si.getStorefiles();
+    Collection<HStoreFile> allFiles = si.getStorefiles();
     if (StoreUtils.hasReferences(allFiles)) {
       LOG.debug("There are references in the store; compacting all files");
       long targetKvs = estimateTargetKvs(allFiles, config.getInitialCount()).getFirst();
@@ -127,7 +127,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     }
 
     int stripeCount = si.getStripeCount();
-    List<StoreFile> l0Files = si.getLevel0Files();
+    List<HStoreFile> l0Files = si.getLevel0Files();
 
     // See if we need to make new stripes.
     boolean shouldCompactL0 = (this.config.getLevel0MinFiles() <= l0Files.size());
@@ -157,7 +157,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     return selectSingleStripeCompaction(si, false, canDropDeletesNoL0, isOffpeak);
   }
 
-  public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
+  public boolean needsCompactions(StripeInformationProvider si, List<HStoreFile> filesCompacting) {
     // Approximation on whether we need compaction.
     return filesCompacting.isEmpty()
         && (StoreUtils.hasReferences(si.getStorefiles())
@@ -166,7 +166,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
   }
 
   @Override
-  public boolean shouldPerformMajorCompaction(Collection<StoreFile> filesToCompact)
+  public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
     throws IOException {
     return false; // there's never a major compaction!
   }
@@ -182,7 +182,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
    */
   protected boolean needsSingleStripeCompaction(StripeInformationProvider si) {
     int minFiles = this.config.getStripeCompactMinFiles();
-    for (List<StoreFile> stripe : si.getStripes()) {
+    for (List<HStoreFile> stripe : si.getStripes()) {
       if (stripe.size() >= minFiles) return true;
     }
     return false;
@@ -190,20 +190,20 @@ public class StripeCompactionPolicy extends CompactionPolicy {
 
   protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformationProvider si,
       boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException {
-    ArrayList<ImmutableList<StoreFile>> stripes = si.getStripes();
+    ArrayList<ImmutableList<HStoreFile>> stripes = si.getStripes();
 
     int bqIndex = -1;
-    List<StoreFile> bqSelection = null;
+    List<HStoreFile> bqSelection = null;
     int stripeCount = stripes.size();
     long bqTotalSize = -1;
     for (int i = 0; i < stripeCount; ++i) {
       // If we want to compact L0 to drop deletes, we only want whole-stripe compactions.
       // So, pass includeL0 as 2nd parameter to indicate that.
-      List<StoreFile> selection = selectSimpleCompaction(stripes.get(i),
+      List<HStoreFile> selection = selectSimpleCompaction(stripes.get(i),
           !canDropDeletesWithoutL0 && includeL0, isOffpeak);
       if (selection.isEmpty()) continue;
       long size = 0;
-      for (StoreFile sf : selection) {
+      for (HStoreFile sf : selection) {
         size += sf.getReader().length();
       }
       if (bqSelection == null || selection.size() > bqSelection.size() ||
@@ -217,7 +217,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
       LOG.debug("No good compaction is possible in any stripe");
       return null;
     }
-    List<StoreFile> filesToCompact = new ArrayList<>(bqSelection);
+    List<HStoreFile> filesToCompact = new ArrayList<>(bqSelection);
     // See if we can, and need to, split this stripe.
     int targetCount = 1;
     long targetKvs = Long.MAX_VALUE;
@@ -244,9 +244,9 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     StripeCompactionRequest req;
     if (includeL0) {
       assert hasAllFiles;
-      List<StoreFile> l0Files = si.getLevel0Files();
+      List<HStoreFile> l0Files = si.getLevel0Files();
       LOG.debug("Adding " + l0Files.size() + " files to compaction to be able to drop deletes");
-      ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>();
+      ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
       sfs.addSublist(filesToCompact);
       sfs.addSublist(l0Files);
       req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries());
@@ -267,33 +267,16 @@ public class StripeCompactionPolicy extends CompactionPolicy {
    * @param allFilesOnly Whether a compaction of all-or-none files is needed.
    * @return The resulting selection.
    */
-  private List<StoreFile> selectSimpleCompaction(
-      List<StoreFile> sfs, boolean allFilesOnly, boolean isOffpeak) {
+  private List<HStoreFile> selectSimpleCompaction(
+      List<HStoreFile> sfs, boolean allFilesOnly, boolean isOffpeak) {
     int minFilesLocal = Math.max(
         allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles());
     int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal);
     return stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal);
   }
 
-  /**
-   * Selects the compaction that compacts all files (to be removed later).
-   * @param si StoreFileManager.
-   * @param targetStripeCount Target stripe count.
-   * @param targetSize Target stripe size.
-   * @return The compaction.
-   */
-  private StripeCompactionRequest selectCompactionOfAllFiles(StripeInformationProvider si,
-      int targetStripeCount, long targetSize) {
-    Collection<StoreFile> allFiles = si.getStorefiles();
-    SplitStripeCompactionRequest request = new SplitStripeCompactionRequest(
-        allFiles, OPEN_KEY, OPEN_KEY, targetStripeCount, targetSize);
-    request.setMajorRangeFull();
-    LOG.debug("Selecting a compaction that includes all " + allFiles.size() + " files");
-    return request;
-  }
-
   private StripeCompactionRequest selectNewStripesCompaction(StripeInformationProvider si) {
-    List<StoreFile> l0Files = si.getLevel0Files();
+    List<HStoreFile> l0Files = si.getLevel0Files();
     Pair<Long, Integer> kvsAndCount = estimateTargetKvs(l0Files, config.getInitialCount());
     LOG.debug("Creating " + kvsAndCount.getSecond() + " initial stripes with "
         + kvsAndCount.getFirst() + " kvs each via L0 compaction of " + l0Files.size() + " files");
@@ -312,9 +295,9 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     long timestampCutoff = EnvironmentEdgeManager.currentTime() - cfTtl;
     // Merge the longest sequence of stripes where all files have expired, if any.
     int start = -1, bestStart = -1, length = 0, bestLength = 0;
-    ArrayList<ImmutableList<StoreFile>> stripes = si.getStripes();
+    ArrayList<ImmutableList<HStoreFile>> stripes = si.getStripes();
     OUTER: for (int i = 0; i < stripes.size(); ++i) {
-      for (StoreFile storeFile : stripes.get(i)) {
+      for (HStoreFile storeFile : stripes.get(i)) {
         if (storeFile.getReader().getMaxTimestamp() < timestampCutoff) continue;
         // Found non-expired file, this stripe has to stay.
         if (length > bestLength) {
@@ -345,7 +328,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     }
     LOG.debug("Merging " + bestLength + " stripes to delete expired store files");
     int endIndex = bestStart + bestLength - 1;
-    ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>();
+    ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
     sfs.addAllSublists(stripes.subList(bestStart, endIndex + 1));
     SplitStripeCompactionRequest result = new SplitStripeCompactionRequest(sfs,
         si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE);
@@ -355,23 +338,23 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     return result;
   }
 
-  private static long getTotalKvCount(final Collection<StoreFile> candidates) {
+  private static long getTotalKvCount(final Collection<HStoreFile> candidates) {
     long totalSize = 0;
-    for (StoreFile storeFile : candidates) {
+    for (HStoreFile storeFile : candidates) {
       totalSize += storeFile.getReader().getEntries();
     }
     return totalSize;
   }
 
-  public static long getTotalFileSize(final Collection<StoreFile> candidates) {
+  public static long getTotalFileSize(final Collection<HStoreFile> candidates) {
     long totalSize = 0;
-    for (StoreFile storeFile : candidates) {
+    for (HStoreFile storeFile : candidates) {
       totalSize += storeFile.getReader().length();
     }
     return totalSize;
   }
 
-  private Pair<Long, Integer> estimateTargetKvs(Collection<StoreFile> files, double splitCount) {
+  private Pair<Long, Integer> estimateTargetKvs(Collection<HStoreFile> files, double splitCount) {
     // If the size is larger than what we target, we don't want to split into proportionally
     // larger parts and then have to split again very soon. So, we will increase the multiplier
     // by one until we get small enough parts. E.g. 5Gb stripe that should have been split into
@@ -452,7 +435,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
       this.targetBoundaries = targetBoundaries;
     }
 
-    public BoundaryStripeCompactionRequest(Collection<StoreFile> files,
+    public BoundaryStripeCompactionRequest(Collection<HStoreFile> files,
         List<byte[]> targetBoundaries) {
       this(new CompactionRequest(files), targetBoundaries);
     }
@@ -494,16 +477,11 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     }
 
     public SplitStripeCompactionRequest(
-        CompactionRequest request, byte[] startRow, byte[] endRow, long targetKvs) {
-      this(request, startRow, endRow, Integer.MAX_VALUE, targetKvs);
-    }
-
-    public SplitStripeCompactionRequest(
-        Collection<StoreFile> files, byte[] startRow, byte[] endRow, long targetKvs) {
+        Collection<HStoreFile> files, byte[] startRow, byte[] endRow, long targetKvs) {
       this(files, startRow, endRow, Integer.MAX_VALUE, targetKvs);
     }
 
-    public SplitStripeCompactionRequest(Collection<StoreFile> files,
+    public SplitStripeCompactionRequest(Collection<HStoreFile> files,
         byte[] startRow, byte[] endRow, int targetCount, long targetKvs) {
       this(new CompactionRequest(files), startRow, endRow, targetCount, targetKvs);
     }
@@ -524,7 +502,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
 
   /** The information about stripes that the policy needs to do its stuff */
   public static interface StripeInformationProvider {
-    public Collection<StoreFile> getStorefiles();
+    public Collection<HStoreFile> getStorefiles();
 
     /**
      * Gets the start row for a given stripe.
@@ -543,7 +521,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     /**
      * @return Level 0 files.
      */
-    public List<StoreFile> getLevel0Files();
+    public List<HStoreFile> getLevel0Files();
 
     /**
      * @return All stripe boundaries; including the open ones on both ends.
@@ -553,7 +531,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
     /**
      * @return The stripes.
      */
-    public ArrayList<ImmutableList<StoreFile>> getStripes();
+    public ArrayList<ImmutableList<HStoreFile>> getStripes();
 
     /**
      * @return Stripe count.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
index c75b24c..f552f96 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
@@ -24,15 +24,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * This is the placeholder for stripe compactor. The implementation, as well as the proper javadoc,
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 public class StripeCompactor extends AbstractMultiOutputCompactor<StripeMultiFileWriter> {
   private static final Log LOG = LogFactory.getLog(StripeCompactor.class);
 
-  public StripeCompactor(Configuration conf, Store store) {
+  public StripeCompactor(Configuration conf, HStore store) {
     super(conf, store);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 615d492..b371b3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -37,16 +37,21 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
@@ -54,10 +59,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
 
 /**
  * Utility class to help read/write the Snapshot Manifest.
@@ -228,20 +229,20 @@ public final class SnapshotManifest {
     // 2. iterate through all the stores in the region
     LOG.debug("Creating references for hfiles");
 
-    for (Store store : region.getStores()) {
+    for (HStore store : region.getStores()) {
       // 2.1. build the snapshot reference for the store
       Object familyData = visitor.familyOpen(regionData,
           store.getColumnFamilyDescriptor().getName());
       monitor.rethrowException();
 
-      List<StoreFile> storeFiles = new ArrayList<>(store.getStorefiles());
+      List<HStoreFile> storeFiles = new ArrayList<>(store.getStorefiles());
       if (LOG.isDebugEnabled()) {
         LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
       }
 
       // 2.2. iterate through all the store's files and create "references".
       for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
-        StoreFile storeFile = storeFiles.get(i);
+        HStoreFile storeFile = storeFiles.get(i);
         monitor.rethrowException();
 
         // create "reference" to this store file.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
index 08a1512..c457e22 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
@@ -34,6 +34,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -702,7 +703,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
       Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table,
       final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
     Path hfilePath = item.getFilePath();
-    byte[] first, last;
+    Optional<byte[]> first, last;
     try (HFile.Reader hfr = HFile.createReader(hfilePath.getFileSystem(getConf()), hfilePath,
       new CacheConfig(getConf()), true, getConf())) {
       hfr.loadFileInfo();
@@ -713,19 +714,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
       return new Pair<>(null, hfilePath.getName());
     }
 
-    LOG.info("Trying to load hfile=" + hfilePath + " first=" + Bytes.toStringBinary(first) +
-        " last=" + Bytes.toStringBinary(last));
-    if (first == null || last == null) {
-      assert first == null && last == null;
+    LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) +
+        " last=" + last.map(Bytes::toStringBinary));
+    if (!first.isPresent() || !last.isPresent()) {
+      assert !first.isPresent() && !last.isPresent();
       // TODO what if this is due to a bad HFile?
       LOG.info("hfile " + hfilePath + " has no entries, skipping");
       return null;
     }
-    if (Bytes.compareTo(first, last) > 0) {
-      throw new IllegalArgumentException(
-          "Invalid range: " + Bytes.toStringBinary(first) + " > " + Bytes.toStringBinary(last));
+    if (Bytes.compareTo(first.get(), last.get()) > 0) {
+      throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) +
+          " > " + Bytes.toStringBinary(last.get()));
     }
-    int idx = Arrays.binarySearch(startEndKeys.getFirst(), first, Bytes.BYTES_COMPARATOR);
+    int idx = Arrays.binarySearch(startEndKeys.getFirst(), first.get(), Bytes.BYTES_COMPARATOR);
     if (idx < 0) {
       // not on boundary, returns -(insertion index). Calculate region it
       // would be in.
@@ -753,7 +754,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
           "Please use hbck tool to fix it first.");
     }
 
-    boolean lastKeyInRange = Bytes.compareTo(last, startEndKeys.getSecond()[idx]) < 0 ||
+    boolean lastKeyInRange = Bytes.compareTo(last.get(), startEndKeys.getSecond()[idx]) < 0 ||
         Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY);
     if (!lastKeyInRange) {
       List<LoadQueueItem> lqis = splitStoreFile(item, table,
@@ -834,8 +835,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
                 " for family " + builder.getNameAsString());
           }
           reader.loadFileInfo();
-          byte[] first = reader.getFirstRowKey();
-          byte[] last = reader.getLastRowKey();
+          byte[] first = reader.getFirstRowKey().get();
+          byte[] last = reader.getLastRowKey().get();
 
           LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" +
               Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
index d4c6254..e8ee3ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.util;
 
 import java.io.DataInput;
@@ -26,17 +24,17 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter;
 import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterBase;
 import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Handles Bloom filter initialization based on configuration and serialized
- * metadata in the reader and writer of {@link org.apache.hadoop.hbase.regionserver.StoreFile}.
+ * Handles Bloom filter initialization based on configuration and serialized metadata in the reader
+ * and writer of {@link org.apache.hadoop.hbase.regionserver.HStoreFile}.
  */
 @InterfaceAudience.Private
 public final class BloomFilterFactory {
@@ -155,7 +153,7 @@ public final class BloomFilterFactory {
 
   /**
    * Creates a new general (Row or RowCol) Bloom filter at the time of
-   * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
+   * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
    *
    * @param conf
    * @param cacheConf
@@ -203,7 +201,7 @@ public final class BloomFilterFactory {
 
   /**
    * Creates a new Delete Family Bloom filter at the time of
-   * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
+   * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
    * @param conf
    * @param cacheConf
    * @param maxKeys an estimate of the number of keys we expect to insert.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 53160e6..0c95e7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -50,6 +50,7 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.SortedSet;
@@ -858,13 +859,13 @@ public class HBaseFsck extends Configured implements Closeable {
                 new CacheConfig(getConf()), true, getConf());
               if ((reader.getFirstKey() != null)
                   && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
-                      ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey()).getKey()) > 0))) {
-                storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey()).getKey();
+                      ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) {
+                storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey();
               }
               if ((reader.getLastKey() != null)
                   && ((storeLastKey == null) || (comparator.compare(storeLastKey,
-                      ((KeyValue.KeyOnlyKeyValue)reader.getLastKey()).getKey())) < 0)) {
-                storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey()).getKey();
+                      ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) {
+                storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey();
               }
               reader.close();
             }
@@ -961,10 +962,10 @@ public class HBaseFsck extends Configured implements Closeable {
           CacheConfig cacheConf = new CacheConfig(getConf());
           hf = HFile.createReader(fs, hfile.getPath(), cacheConf, true, getConf());
           hf.loadFileInfo();
-          Cell startKv = hf.getFirstKey();
-          start = CellUtil.cloneRow(startKv);
-          Cell endKv = hf.getLastKey();
-          end = CellUtil.cloneRow(endKv);
+          Optional<Cell> startKv = hf.getFirstKey();
+          start = CellUtil.cloneRow(startKv.get());
+          Optional<Cell> endKv = hf.getLastKey();
+          end = CellUtil.cloneRow(endKv.get());
         } catch (IOException ioe) {
           LOG.warn("Problem reading orphan file " + hfile + ", skipping");
           continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
index 34fd6f7..369aed7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY;
+
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Handles ROW bloom related context. It works with both ByteBufferedCell and byte[] backed cells
@@ -39,7 +40,7 @@ public class RowBloomContext extends BloomContext {
   public void addLastBloomKey(Writer writer) throws IOException {
     if (this.getLastCell() != null) {
       byte[] key = CellUtil.copyRow(this.getLastCell());
-      writer.appendFileInfo(StoreFile.LAST_BLOOM_KEY, key);
+      writer.appendFileInfo(LAST_BLOOM_KEY, key);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java
index 9ead570..90cbcb0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY;
+
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Handles ROWCOL bloom related context. It can work with both BytebufferdCells
@@ -43,7 +44,7 @@ public class RowColBloomContext extends BloomContext {
       Cell firstOnRow = CellUtil.createFirstOnRowCol(this.getLastCell());
       // This copy happens only once when the writer is closed
       byte[] key = CellUtil.getCellKeySerializedAsKeyValueKey(firstOnRow);
-      writer.appendFileInfo(StoreFile.LAST_BLOOM_KEY, key);
+      writer.appendFileInfo(LAST_BLOOM_KEY, key);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
index 04aafa7..4b25635 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
@@ -95,7 +95,7 @@
      List<? extends Store> stores = region.getStores();
      for (Store store : stores) {
        String cf = store.getColumnFamilyName();
-       Collection<StoreFile> storeFiles = store.getStorefiles(); %>
+       Collection<? extends StoreFile> storeFiles = store.getStorefiles(); %>
 
        <h3>Column Family: <%= cf %></h2>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index 85f65e8..6e7bf7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
@@ -41,16 +39,14 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -59,6 +55,10 @@ import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+
 /**
  * Test for the case where a regionserver going down has enough cycles to do damage to regions that
  * have actually been assigned elsehwere.
@@ -201,7 +201,7 @@ public class TestIOFencing {
     }
 
     @Override
-    protected void completeCompaction(Collection<StoreFile> compactedFiles) throws IOException {
+    protected void completeCompaction(Collection<HStoreFile> compactedFiles) throws IOException {
       try {
         r.compactionsWaiting.countDown();
         r.compactionsBlocked.await();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index dfdd11e..9c100a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -49,12 +49,10 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.master.LoadBalancer;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -69,6 +67,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
+
 /**
  * Class to test HBaseAdmin.
  * Spins up the minicluster once at test start and then takes it down afterward.
@@ -1320,8 +1321,8 @@ public class TestAdmin1 {
 
       List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName);
       for (HRegion r : regions) {
-        Store store = r.getStore(Bytes.toBytes(fn));
-        for (StoreFile sf : store.getStorefiles()) {
+        HStore store = r.getStore(Bytes.toBytes(fn));
+        for (HStoreFile sf : store.getStorefiles()) {
           assertTrue(sf.toString().contains(fn));
           assertTrue("Column family " + fn + " should have 3 copies",
             FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf
@@ -1329,7 +1330,7 @@ public class TestAdmin1 {
         }
 
         store = r.getStore(Bytes.toBytes(fn1));
-        for (StoreFile sf : store.getStorefiles()) {
+        for (HStoreFile sf : store.getStorefiles()) {
           assertTrue(sf.toString().contains(fn1));
           assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo()
               .getFileStatus().getReplication());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
index ac404bb..061d097 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -253,14 +254,16 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
   }
 
   public static class CompactorRegionObserver implements RegionObserver {
+
     @Override
     public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
-        Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
-        InternalScanner s, CompactionLifeCycleTracker request, long readPoint) throws IOException {
-      return createCompactorScanner(store, scanners, scanType, earliestPutTs);
+        Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
+        long earliestPutTs, InternalScanner s, CompactionLifeCycleTracker request, long readPoint)
+        throws IOException {
+      return createCompactorScanner((HStore) store, scanners, scanType, earliestPutTs);
     }
 
-    private InternalScanner createCompactorScanner(Store store,
+    private InternalScanner createCompactorScanner(HStore store,
         List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs)
         throws IOException {
       return new CompactorStoreScanner(store, store.getScanInfo(), OptionalInt.empty(), scanners,
@@ -270,7 +273,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
 
   private static class CompactorStoreScanner extends StoreScanner {
 
-    public CompactorStoreScanner(Store store, ScanInfo scanInfo, OptionalInt maxVersions,
+    public CompactorStoreScanner(HStore store, ScanInfo scanInfo, OptionalInt maxVersions,
         List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
         long earliestPutTs) throws IOException {
       super(store, scanInfo, maxVersions, scanners, scanType, smallestReadPoint, earliestPutTs);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index ae4f8a8..aaddd34 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -54,6 +54,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -69,7 +70,6 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
@@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateR
 import org.apache.hadoop.hbase.regionserver.DelegatingKeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -554,7 +555,7 @@ public class TestFromClientSide {
     }
 
     class MyStoreScanner extends StoreScanner {
-      public MyStoreScanner(Store store, ScanInfo scanInfo, Scan scan, NavigableSet<byte[]> columns,
+      public MyStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet<byte[]> columns,
           long readPt) throws IOException {
         super(store, scanInfo, scan, columns, readPt);
       }
@@ -588,7 +589,7 @@ public class TestFromClientSide {
     public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s,
         final long readPt) throws IOException {
-      return new MyStoreScanner(store, store.getScanInfo(), scan, targetCols, readPt);
+      return new MyStoreScanner((HStore) store, store.getScanInfo(), scan, targetCols, readPt);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
index 9938c18..ba1e222 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
@@ -19,6 +19,11 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
@@ -57,16 +62,12 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileReader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
 
 /**
  * A sample region observer that tests the RegionObserver interface.
@@ -202,13 +203,13 @@ public class SimpleRegionObserver implements RegionObserver {
 
   @Override
   public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
-      List<StoreFile> candidates, CompactionLifeCycleTracker tracker) throws IOException {
+      List<? extends StoreFile> candidates, CompactionLifeCycleTracker tracker) throws IOException {
     ctPreCompactSelect.incrementAndGet();
   }
 
   @Override
   public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
-      ImmutableList<StoreFile> selected, CompactionLifeCycleTracker tracker) {
+      ImmutableList<? extends StoreFile> selected, CompactionLifeCycleTracker tracker) {
     ctPostCompactSelect.incrementAndGet();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 9c06c3e..afeb763 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
@@ -116,7 +115,7 @@ public class TestRegionObserverScannerOpenHook {
         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPt)
         throws IOException {
       scan.setFilter(new NoDataFilter());
-      return new StoreScanner(store, store.getScanInfo(), scan, targetCols, readPt);
+      return new StoreScanner((HStore) store, store.getScanInfo(), scan, targetCols, readPt);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
index 0fd3cdb..37d6b8f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
@@ -96,7 +96,7 @@ public class TestHalfStoreFileReader {
 
     HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf);
     r.loadFileInfo();
-    Cell midKV = r.midkey();
+    Cell midKV = r.midKey().get();
     byte[] midkey = CellUtil.cloneRow(midKV);
 
     // System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
@@ -155,7 +155,7 @@ public class TestHalfStoreFileReader {
 
     HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf);
     r.loadFileInfo();
-    Cell midKV = r.midkey();
+    Cell midKV = r.midKey().get();
     byte[] midkey = CellUtil.cloneRow(midKV);
 
     Reference bottom = new Reference(midkey, Reference.Range.bottom);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 62a7c48..13589fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -164,8 +164,8 @@ public class TestHFile  {
     w.close();
     Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
     r.loadFileInfo();
-    assertNull(r.getFirstKey());
-    assertNull(r.getLastKey());
+    assertFalse(r.getFirstKey().isPresent());
+    assertFalse(r.getLastKey().isPresent());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 3d1af90..82c0eca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -571,7 +571,7 @@ public class TestHFileBlockIndex {
    boolean hasArrayIndexOutOfBoundsException = false;
    try {
      // get the mid-key.
-     reader.midkey();
+     reader.midKey();
    } catch (ArrayIndexOutOfBoundsException e) {
      hasArrayIndexOutOfBoundsException = true;
    } finally {
@@ -649,8 +649,8 @@ public class TestHFileBlockIndex {
       assertEquals(expectedNumLevels,
           reader.getTrailer().getNumDataIndexLevels());
 
-      assertTrue(Bytes.equals(keys[0], ((KeyValue)reader.getFirstKey()).getKey()));
-      assertTrue(Bytes.equals(keys[NUM_KV - 1], ((KeyValue)reader.getLastKey()).getKey()));
+      assertTrue(Bytes.equals(keys[0], ((KeyValue)reader.getFirstKey().get()).getKey()));
+      assertTrue(Bytes.equals(keys[NUM_KV - 1], ((KeyValue)reader.getLastKey().get()).getKey()));
       LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1]));
 
       for (boolean pread : new boolean[] { false, true }) {
@@ -706,7 +706,7 @@ public class TestHFileBlockIndex {
       // Validate the mid-key.
       assertEquals(
           Bytes.toStringBinary(blockKeys.get((blockKeys.size() - 1) / 2)),
-          reader.midkey());
+          reader.midKey());
 
       assertEquals(UNCOMPRESSED_INDEX_SIZES[testI],
           reader.getTrailer().getUncompressedDataIndexSize());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
index 5cc2580..5f5cb74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
@@ -186,8 +186,8 @@ public class TestHFileSeek extends TestCase {
     Reader reader = HFile.createReaderFromStream(path, fsdis,
         fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
     reader.loadFileInfo();
-    KeySampler kSampler = new KeySampler(rng, ((KeyValue) reader.getFirstKey()).getKey(),
-        ((KeyValue) reader.getLastKey()).getKey(), keyLenGen);
+    KeySampler kSampler = new KeySampler(rng, ((KeyValue) reader.getFirstKey().get()).getKey(),
+        ((KeyValue) reader.getLastKey().get()).getKey(), keyLenGen);
     HFileScanner scanner = reader.getScanner(false, USE_PREAD);
     BytesWritable key = new BytesWritable();
     timer.reset();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
index 12aed50..8f0c5d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
@@ -723,8 +723,8 @@ public class TestMobCompactor {
 
     @Override
     public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
-        List<StoreFile> candidates, CompactionLifeCycleTracker tracker) throws IOException {
-
+        List<? extends StoreFile> candidates, CompactionLifeCycleTracker tracker)
+        throws IOException {
       int count = candidates.size();
       if (count >= 2) {
         for (int i = 0; i < count - 2; i++) {
@@ -815,7 +815,7 @@ public class TestMobCompactor {
       Assert.assertTrue(hasFiles);
       Path path = files[0].getPath();
       CacheConfig cacheConf = new CacheConfig(conf);
-      StoreFile sf = new HStoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf,
+      HStoreFile sf = new HStoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf,
         BloomType.NONE, true);
       sf.initReader();
       HFile.Reader reader = sf.getReader().getHFileReader();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
index b8e1204..6681a96 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.mob.compactions;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
@@ -81,7 +81,6 @@ import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -494,12 +493,12 @@ public class TestPartitionedMobCompactor {
         PartitionedMobCompactionRequest request = select(files, isForceAllFiles);
 
         // Make sure that there is no del Partitions
-        Assert.assertTrue(request.getDelPartitions().size() == 0);
+        assertTrue(request.getDelPartitions().size() == 0);
 
         // Make sure that when there is no startKey/endKey for partition.
         for (CompactionPartition p : request.getCompactionPartitions()) {
-          Assert.assertTrue(p.getStartKey() == null);
-          Assert.assertTrue(p.getEndKey() == null);
+          assertTrue(p.getStartKey() == null);
+          assertTrue(p.getEndKey() == null);
         }
         return null;
       }
@@ -530,18 +529,18 @@ public class TestPartitionedMobCompactor {
       }
       PartitionedMobCompactionRequest request = select(files, isForceAllFiles);
 
-      Assert.assertTrue(request.getDelPartitions().size() == delPartitionSize);
+      assertTrue(request.getDelPartitions().size() == delPartitionSize);
       if (request.getDelPartitions().size() > 0) {
         for (CompactionPartition p : request.getCompactionPartitions()) {
-          Assert.assertTrue(p.getStartKey() != null);
-          Assert.assertTrue(p.getEndKey() != null);
+          assertTrue(p.getStartKey() != null);
+          assertTrue(p.getEndKey() != null);
         }
       }
 
       try {
         for (CompactionDelPartition delPartition : request.getDelPartitions()) {
           for (Path newDelPath : delPartition.listDelFiles()) {
-            StoreFile sf =
+            HStoreFile sf =
                 new HStoreFile(fs, newDelPath, conf, this.cacheConfig, BloomType.NONE, true);
             // pre-create reader of a del file to avoid race condition when opening the reader in
             // each partition.
@@ -553,11 +552,11 @@ public class TestPartitionedMobCompactor {
         // Make sure that CompactionDelPartitions does not overlap
         CompactionDelPartition prevDelP = null;
         for (CompactionDelPartition delP : request.getDelPartitions()) {
-          Assert.assertTrue(
+          assertTrue(
               Bytes.compareTo(delP.getId().getStartKey(), delP.getId().getEndKey()) <= 0);
 
           if (prevDelP != null) {
-            Assert.assertTrue(
+            assertTrue(
                 Bytes.compareTo(prevDelP.getId().getEndKey(), delP.getId().getStartKey()) < 0);
           }
         }
@@ -567,7 +566,7 @@ public class TestPartitionedMobCompactor {
         // Make sure that only del files within key range for a partition is included in compaction.
         // compact the mob files by partitions in parallel.
         for (CompactionPartition partition : request.getCompactionPartitions()) {
-          List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
+          List<HStoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
           if (!request.getDelPartitions().isEmpty()) {
             if (!((Bytes.compareTo(request.getDelPartitions().get(0).getId().getStartKey(),
                 partition.getEndKey()) > 0) || (Bytes.compareTo(
@@ -575,23 +574,23 @@ public class TestPartitionedMobCompactor {
                     .getEndKey(), partition.getStartKey()) < 0))) {
 
               if (delFiles.size() > 0) {
-                Assert.assertTrue(delFiles.size() == 1);
+                assertTrue(delFiles.size() == 1);
                 affectedPartitions += delFiles.size();
-                Assert.assertTrue(Bytes.compareTo(partition.getStartKey(),
-                    CellUtil.cloneRow(delFiles.get(0).getLastKey())) <= 0);
-                Assert.assertTrue(Bytes.compareTo(partition.getEndKey(),
-                    CellUtil.cloneRow(delFiles.get(delFiles.size() - 1).getFirstKey())) >= 0);
+                assertTrue(Bytes.compareTo(partition.getStartKey(),
+                  CellUtil.cloneRow(delFiles.get(0).getLastKey().get())) <= 0);
+                assertTrue(Bytes.compareTo(partition.getEndKey(),
+                  CellUtil.cloneRow(delFiles.get(delFiles.size() - 1).getFirstKey().get())) >= 0);
               }
             }
           }
         }
         // The del file is only included in one partition
-        Assert.assertTrue(affectedPartitions == PartitionsIncludeDelFiles);
+        assertTrue(affectedPartitions == PartitionsIncludeDelFiles);
       } finally {
         for (CompactionDelPartition delPartition : request.getDelPartitions()) {
-          for (StoreFile storeFile : delPartition.getStoreFiles()) {
+          for (HStoreFile storeFile : delPartition.getStoreFiles()) {
             try {
-              storeFile.closeReader(true);
+              storeFile.closeStoreFile(true);
             } catch (IOException e) {
               LOG.warn("Failed to close the reader on store file " + storeFile.getPath(), e);
             }
@@ -679,19 +678,19 @@ public class TestPartitionedMobCompactor {
         // Make sure that when there is no del files, there will be no startKey/endKey for partition.
         if (request.getDelPartitions().size() == 0) {
           for (CompactionPartition p : request.getCompactionPartitions()) {
-            Assert.assertTrue(p.getStartKey() == null);
-            Assert.assertTrue(p.getEndKey() == null);
+            assertTrue(p.getStartKey() == null);
+            assertTrue(p.getEndKey() == null);
           }
         }
 
         // Make sure that CompactionDelPartitions does not overlap
         CompactionDelPartition prevDelP = null;
         for (CompactionDelPartition delP : request.getDelPartitions()) {
-          Assert.assertTrue(Bytes.compareTo(delP.getId().getStartKey(),
+          assertTrue(Bytes.compareTo(delP.getId().getStartKey(),
               delP.getId().getEndKey()) <= 0);
 
           if (prevDelP != null) {
-            Assert.assertTrue(Bytes.compareTo(prevDelP.getId().getEndKey(),
+            assertTrue(Bytes.compareTo(prevDelP.getId().getEndKey(),
                 delP.getId().getStartKey()) < 0);
           }
         }
@@ -699,25 +698,24 @@ public class TestPartitionedMobCompactor {
         // Make sure that only del files within key range for a partition is included in compaction.
         // compact the mob files by partitions in parallel.
         for (CompactionPartition partition : request.getCompactionPartitions()) {
-          List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
+          List<HStoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
           if (!request.getDelPartitions().isEmpty()) {
             if (!((Bytes.compareTo(request.getDelPartitions().get(0).getId().getStartKey(),
                 partition.getEndKey()) > 0) || (Bytes.compareTo(
                 request.getDelPartitions().get(request.getDelPartitions().size() - 1).getId()
                     .getEndKey(), partition.getStartKey()) < 0))) {
               if (delFiles.size() > 0) {
-                Assert.assertTrue(Bytes
-                    .compareTo(partition.getStartKey(), delFiles.get(0).getFirstKey().getRowArray())
-                    >= 0);
-                Assert.assertTrue(Bytes.compareTo(partition.getEndKey(),
-                    delFiles.get(delFiles.size() - 1).getLastKey().getRowArray()) <= 0);
+                assertTrue(Bytes.compareTo(partition.getStartKey(),
+                  delFiles.get(0).getFirstKey().get().getRowArray()) >= 0);
+                assertTrue(Bytes.compareTo(partition.getEndKey(),
+                  delFiles.get(delFiles.size() - 1).getLastKey().get().getRowArray()) <= 0);
               }
             }
           }
         }
 
         // assert the compaction type
-        Assert.assertEquals(type, request.type);
+        assertEquals(type, request.type);
         // assert get the right partitions
         compareCompactedPartitions(expected, request.compactionPartitions);
         // assert get the right del files
@@ -750,8 +748,8 @@ public class TestPartitionedMobCompactor {
         }
         List<Path> newDelPaths = compactDelFiles(request, delFilePaths);
         // assert the del files are merged.
-        Assert.assertEquals(expectedFileCount, newDelPaths.size());
-        Assert.assertEquals(expectedCellCount, countDelCellsInDelFiles(newDelPaths));
+        assertEquals(expectedFileCount, newDelPaths.size());
+        assertEquals(expectedCellCount, countDelCellsInDelFiles(newDelPaths));
         return null;
       }
     };
@@ -784,9 +782,9 @@ public class TestPartitionedMobCompactor {
     }
     Collections.sort(expected);
     Collections.sort(actualKeys);
-    Assert.assertEquals(expected.size(), actualKeys.size());
+    assertEquals(expected.size(), actualKeys.size());
     for (int i = 0; i < expected.size(); i++) {
-      Assert.assertEquals(expected.get(i), actualKeys.get(i));
+      assertEquals(expected.get(i), actualKeys.get(i));
     }
   }
 
@@ -802,7 +800,7 @@ public class TestPartitionedMobCompactor {
       }
     }
     for (Path f : delFiles) {
-      Assert.assertTrue(delMap.containsKey(f));
+      assertTrue(delMap.containsKey(f));
     }
   }
 
@@ -874,10 +872,10 @@ public class TestPartitionedMobCompactor {
    * @return the cell size
    */
   private int countDelCellsInDelFiles(List<Path> paths) throws IOException {
-    List<StoreFile> sfs = new ArrayList<>();
+    List<HStoreFile> sfs = new ArrayList<>();
     int size = 0;
     for (Path path : paths) {
-      StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
+      HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
       sfs.add(sf);
     }
     List<KeyValueScanner> scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 68c5d19..86df39f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -38,16 +38,15 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.Predicate;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.HashMultimap;
@@ -435,10 +434,9 @@ public class SpaceQuotaHelperForTests {
     @Override
     public boolean evaluate() throws Exception {
       for (HRegion region : cluster.getRegions(tn)) {
-        for (Store store : region.getStores()) {
-          HStore hstore = (HStore) store;
-          Collection<StoreFile> files =
-              hstore.getStoreEngine().getStoreFileManager().getCompactedfiles();
+        for (HStore store : region.getStores()) {
+          Collection<HStoreFile> files =
+              store.getStoreEngine().getStoreFileManager().getCompactedfiles();
           if (null != files && !files.isEmpty()) {
             LOG.debug(region.getRegionInfo().getEncodedName() + " still has compacted files");
             return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
index b33b45d..58691c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
@@ -19,9 +19,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -32,9 +29,12 @@ import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequ
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 public class AbstractTestDateTieredCompactionPolicy extends TestCompactionPolicy {
 
-  protected ArrayList<StoreFile> sfCreate(long[] minTimestamps, long[] maxTimestamps, long[] sizes)
+  protected ArrayList<HStoreFile> sfCreate(long[] minTimestamps, long[] maxTimestamps, long[] sizes)
       throws IOException {
     ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
     EnvironmentEdgeManager.injectEdge(timeMachine);
@@ -45,17 +45,17 @@ public class AbstractTestDateTieredCompactionPolicy extends TestCompactionPolicy
       ageInDisk.add(0L);
     }
 
-    ArrayList<StoreFile> ret = Lists.newArrayList();
+    ArrayList<HStoreFile> ret = Lists.newArrayList();
     for (int i = 0; i < sizes.length; i++) {
-      MockStoreFile msf =
-          new MockStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, i);
+      MockHStoreFile msf =
+          new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, i);
       msf.setTimeRangeTracker(new TimeRangeTracker(minTimestamps[i], maxTimestamps[i]));
       ret.add(msf);
     }
     return ret;
   }
 
-  protected void compactEquals(long now, ArrayList<StoreFile> candidates, long[] expectedFileSizes,
+  protected void compactEquals(long now, ArrayList<HStoreFile> candidates, long[] expectedFileSizes,
       long[] expectedBoundaries, boolean isMajor, boolean toCompact) throws IOException {
     ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
     EnvironmentEdgeManager.injectEdge(timeMachine);
@@ -64,17 +64,17 @@ public class AbstractTestDateTieredCompactionPolicy extends TestCompactionPolicy
     DateTieredCompactionPolicy policy =
         (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy();
     if (isMajor) {
-      for (StoreFile file : candidates) {
-        ((MockStoreFile) file).setIsMajor(true);
+      for (HStoreFile file : candidates) {
+        ((MockHStoreFile) file).setIsMajor(true);
       }
       assertEquals(toCompact, policy.shouldPerformMajorCompaction(candidates));
       request = (DateTieredCompactionRequest) policy.selectMajorCompaction(candidates);
     } else {
-      assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.<StoreFile> of()));
+      assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.of()));
       request =
           (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false);
     }
-    List<StoreFile> actual = Lists.newArrayList(request.getFiles());
+    List<HStoreFile> actual = Lists.newArrayList(request.getFiles());
     assertEquals(Arrays.toString(expectedFileSizes), Arrays.toString(getSizes(actual)));
     assertEquals(Arrays.toString(expectedBoundaries),
       Arrays.toString(request.getBoundaries().toArray()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index 2635e2d..5f85826 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -592,7 +592,7 @@ public class DataBlockEncodingTool {
     Path path = new Path(hfilePath);
     CacheConfig cacheConf = new CacheConfig(conf);
     FileSystem fs = FileSystem.get(conf);
-    StoreFile hsf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
+    HStoreFile hsf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
     hsf.initReader();
     StoreFileReader reader = hsf.getReader();
     reader.loadFileInfo();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
index bde0934..82e1755 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
@@ -59,7 +59,7 @@ public class EncodedSeekPerformanceTest {
     List<Cell> allKeyValues = new ArrayList<>();
 
     // read all of the key values
-    StoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
+    HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
         path, configuration, cacheConf, BloomType.NONE, true);
     storeFile.initReader();
     StoreFileReader reader = storeFile.getReader();
@@ -71,7 +71,7 @@ public class EncodedSeekPerformanceTest {
       allKeyValues.add(current);
     }
 
-    storeFile.closeReader(cacheConf.shouldEvictOnClose());
+    storeFile.closeStoreFile(cacheConf.shouldEvictOnClose());
 
     // pick seeks by random
     List<Cell> seeks = new ArrayList<>();
@@ -89,7 +89,7 @@ public class EncodedSeekPerformanceTest {
   private void runTest(Path path, DataBlockEncoding blockEncoding,
       List<Cell> seeks) throws IOException {
     // read all of the key values
-    StoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
+    HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
       path, configuration, cacheConf, BloomType.NONE, true);
     storeFile.initReader();
     long totalSize = 0;
@@ -132,7 +132,7 @@ public class EncodedSeekPerformanceTest {
     double seeksPerSec = (seeks.size() * NANOSEC_IN_SEC) /
         (finishSeeksTime - startSeeksTime);
 
-    storeFile.closeReader(cacheConf.shouldEvictOnClose());
+    storeFile.closeStoreFile(cacheConf.shouldEvictOnClose());
     clearBlockCache();
 
     System.out.println(blockEncoding);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
new file mode 100644
index 0000000..78b1ef6
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalLong;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/** A mock used so our tests don't deal with actual StoreFiles */
+@InterfaceAudience.Private
+public class MockHStoreFile extends HStoreFile {
+  long length = 0;
+  boolean isRef = false;
+  long ageInDisk;
+  long sequenceid;
+  private Map<byte[], byte[]> metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+  byte[] splitPoint = null;
+  TimeRangeTracker timeRangeTracker;
+  long entryCount;
+  boolean isMajor;
+  HDFSBlocksDistribution hdfsBlocksDistribution;
+  long modificationTime;
+  boolean compactedAway;
+
+  MockHStoreFile(HBaseTestingUtility testUtil, Path testPath,
+      long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
+    super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
+        new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true);
+    this.length = length;
+    this.isRef = isRef;
+    this.ageInDisk = ageInDisk;
+    this.sequenceid = sequenceid;
+    this.isMajor = false;
+    hdfsBlocksDistribution = new HDFSBlocksDistribution();
+    hdfsBlocksDistribution.addHostsAndBlockWeight(
+      new String[] { RSRpcServices.getHostname(testUtil.getConfiguration(), false) }, 1);
+    modificationTime = EnvironmentEdgeManager.currentTime();
+  }
+
+  void setLength(long newLen) {
+    this.length = newLen;
+  }
+
+  @Override
+  public long getMaxSequenceId() {
+    return sequenceid;
+  }
+
+  @Override
+  public boolean isMajorCompactionResult() {
+    return isMajor;
+  }
+
+  public void setIsMajor(boolean isMajor) {
+    this.isMajor = isMajor;
+  }
+
+  @Override
+  public boolean isReference() {
+    return this.isRef;
+  }
+
+  @Override
+  public boolean isBulkLoadResult() {
+    return false;
+  }
+
+  @Override
+  public byte[] getMetadataValue(byte[] key) {
+    return this.metadata.get(key);
+  }
+
+  public void setMetadataValue(byte[] key, byte[] value) {
+    this.metadata.put(key, value);
+  }
+
+  void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
+    this.timeRangeTracker = timeRangeTracker;
+  }
+
+  void setEntries(long entryCount) {
+    this.entryCount = entryCount;
+  }
+
+  public OptionalLong getMinimumTimestamp() {
+    return timeRangeTracker == null ? OptionalLong.empty()
+        : OptionalLong.of(timeRangeTracker.getMin());
+  }
+
+  public OptionalLong getMaximumTimestamp() {
+    return timeRangeTracker == null ? OptionalLong.empty()
+        : OptionalLong.of(timeRangeTracker.getMax());
+  }
+
+  @Override
+  public void markCompactedAway() {
+    this.compactedAway = true;
+  }
+
+  @Override
+  public boolean isCompactedAway() {
+    return compactedAway;
+  }
+
+  @Override
+  public long getModificationTimeStamp() {
+    return modificationTime;
+  }
+
+  @Override
+  public HDFSBlocksDistribution getHDFSBlockDistribution() {
+    return hdfsBlocksDistribution;
+  }
+
+  @Override
+  public void initReader() throws IOException {
+  }
+
+  @Override
+  public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder,
+      boolean canOptimizeForNonNullColumn) {
+    return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder,
+      canOptimizeForNonNullColumn);
+  }
+
+  @Override
+  public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
+      boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
+      throws IOException {
+    return getReader().getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder,
+      canOptimizeForNonNullColumn);
+  }
+
+  @Override
+  public StoreFileReader getReader() {
+    final long len = this.length;
+    final TimeRangeTracker timeRangeTracker = this.timeRangeTracker;
+    final long entries = this.entryCount;
+    return new StoreFileReader() {
+      @Override
+      public long length() {
+        return len;
+      }
+
+      @Override
+      public long getMaxTimestamp() {
+        return timeRange == null? Long.MAX_VALUE: timeRangeTracker.getMax();
+      }
+
+      @Override
+      public long getEntries() {
+        return entries;
+      }
+
+      @Override
+      public void close(boolean evictOnClose) throws IOException {
+        // no-op
+      }
+
+      @Override
+      public Optional<Cell> getLastKey() {
+        if (splitPoint != null) {
+          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+              .setType(KeyValue.Type.Put.getCode())
+              .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build());
+        } else {
+          return Optional.empty();
+        }
+      }
+
+      @Override
+      public Optional<Cell> midKey() throws IOException {
+        if (splitPoint != null) {
+          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+              .setType(KeyValue.Type.Put.getCode()).setRow(splitPoint).build());
+        } else {
+          return Optional.empty();
+        }
+      }
+
+      @Override
+      public Optional<Cell> getFirstKey() {
+        if (splitPoint != null) {
+          return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+              .setType(KeyValue.Type.Put.getCode()).setRow(splitPoint, 0, splitPoint.length - 1)
+              .build());
+        } else {
+          return Optional.empty();
+        }
+      }
+    };
+  }
+
+  @Override
+  public OptionalLong getBulkLoadTimestamp() {
+    // we always return false for isBulkLoadResult so we do not have a bulk load timestamp
+    return OptionalLong.empty();
+  }
+}