You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/09/22 16:34:31 UTC

[43/50] [abbrv] hbase git commit: HBASE-14439 API cleanup

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
index 61c3eda..7530a79 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
@@ -39,7 +39,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable;
 import org.apache.hadoop.hbase.security.User;
@@ -59,7 +60,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
   private final HRegion region_a;
   private final HRegion region_b;
   // merges dir is under region_a
-  private final Path mergesdir;
+  private final StorageIdentifier mergesdir;
   // We only merge adjacent regions if forcible is false
   private final boolean forcible;
   private final long masterSystemTime;
@@ -148,7 +149,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
     }
     this.forcible = forcible;
     this.masterSystemTime = masterSystemTime;
-    this.mergesdir = region_a.getRegionFileSystem().getMergesDir();
+    this.mergesdir = region_a.getRegionStorage().getMergesContainer();
   }
 
   private void transition(RegionMergeTransactionPhase nextPhase) throws IOException {
@@ -383,7 +384,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
 
     transition(RegionMergeTransactionPhase.SET_MERGING);
 
-    this.region_a.getRegionFileSystem().createMergesDir();
+    this.region_a.getRegionStorage().createMergesContainer();
 
     transition(RegionMergeTransactionPhase.CREATED_MERGE_DIR);
 
@@ -557,7 +558,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
       Map<byte[], List<StoreFile>> hstoreFilesOfRegionB)
       throws IOException {
     // Create reference file(s) of region A in mergdir
-    RegionFileSystem fs_a = this.region_a.getRegionFileSystem();
+    RegionStorage fs_a = this.region_a.getRegionStorage();
     for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionA.entrySet()) {
       String familyName = Bytes.toString(entry.getKey());
       for (StoreFile storeFile : entry.getValue()) {
@@ -565,7 +566,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
       }
     }
     // Create reference file(s) of region B in mergedir
-    RegionFileSystem fs_b = this.region_b.getRegionFileSystem();
+    RegionStorage fs_b = this.region_b.getRegionStorage();
     for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionB.entrySet()) {
       String familyName = Bytes.toString(entry.getKey());
       for (StoreFile storeFile : entry.getValue()) {
@@ -616,7 +617,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
         case CREATED_MERGE_DIR:
           this.region_a.writestate.writesEnabled = true;
           this.region_b.writestate.writesEnabled = true;
-          this.region_a.getRegionFileSystem().cleanupMergesDir();
+          this.region_a.getRegionStorage().cleanupMergesContainer();
           break;
 
         case CLOSED_REGION_A:
@@ -655,7 +656,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
           break;
 
         case STARTED_MERGED_REGION_CREATION:
-          this.region_a.getRegionFileSystem().cleanupMergedRegion(
+          this.region_a.getRegionStorage().cleanupMergedRegion(
               this.mergedRegionInfo);
           break;
 
@@ -688,7 +689,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
   }
 
   @VisibleForTesting
-  Path getMergesDir() {
+  StorageIdentifier getMergesDir() {
     return this.mergesdir;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
index e20b3e2..b966d92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
@@ -128,7 +128,7 @@ public abstract class RegionSplitPolicy extends Configured {
   }
 
   /**
-   * In {@link HRegionFileSystem#splitStoreFile(org.apache.hadoop.hbase.HRegionInfo, String,
+   * In {@link RegionStorage#splitStoreFile(org.apache.hadoop.hbase.HRegionInfo, String,
    * StoreFile, byte[], boolean, RegionSplitPolicy)} we are not creating the split reference
    * if split row not lies in the StoreFile range. But in some use cases we may need to create
    * the split reference even when the split row not lies in the range. This method can be used
@@ -137,7 +137,7 @@ public abstract class RegionSplitPolicy extends Configured {
    * @param familyName
    * @return whether to skip the StoreFile range check or not
    */
-  protected boolean skipStoreFileRangeCheck(String familyName) {
+  public boolean skipStoreFileRangeCheck(String familyName) {
     return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index e4aee69..d03365f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -317,7 +319,7 @@ public class SplitTransactionImpl implements SplitTransaction {
 
     transition(SplitTransactionPhase.SET_SPLITTING);
 
-    this.parent.getRegionFileSystem().createSplitsDir();
+    this.parent.getRegionStorage().createSplitsContainer();
 
     transition(SplitTransactionPhase.CREATE_SPLIT_DIR);
 
@@ -364,25 +366,29 @@ public class SplitTransactionImpl implements SplitTransaction {
 
     transition(SplitTransactionPhase.STARTED_REGION_A_CREATION);
 
+    // TODO with referenceFileCount in RegionStorage this should clean up
     assertReferenceFileCount(expectedReferences.getFirst(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_a));
+        ((LegacyPathIdentifier)this.parent.getRegionStorage().getSplitsContainer(this.hri_a)).path);
     HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
     assertReferenceFileCount(expectedReferences.getFirst(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName()));
+        new Path(((LegacyPathIdentifier)this.parent.getRegionStorage().getTableContainer()).path,
+            this.hri_a.getEncodedName()));
 
     // Ditto
 
     transition(SplitTransactionPhase.STARTED_REGION_B_CREATION);
 
     assertReferenceFileCount(expectedReferences.getSecond(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_b));
+        ((LegacyPathIdentifier)this.parent.getRegionStorage().getSplitsContainer(this.hri_b)).path);
     HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
     assertReferenceFileCount(expectedReferences.getSecond(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName()));
+        new Path(((LegacyPathIdentifier)this.parent.getRegionStorage().getTableContainer()).path,
+            this.hri_b.getEncodedName()));
 
     return new PairOfSameType<Region>(a, b);
   }
 
+  // TODO file count should be in RegionStorage
   @VisibleForTesting
   void assertReferenceFileCount(int expectedReferenceFileCount, Path dir)
       throws IOException {
@@ -606,7 +612,7 @@ public class SplitTransactionImpl implements SplitTransaction {
     ThreadFactory factory = builder.build();
     ThreadPoolExecutor threadPool =
       (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
-    List<Future<Pair<Path,Path>>> futures = new ArrayList<Future<Pair<Path,Path>>> (nbFiles);
+    List<Future<Pair<StorageIdentifier,StorageIdentifier>>> futures = new ArrayList<> (nbFiles);
 
     // Split each store file.
     for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
@@ -638,9 +644,9 @@ public class SplitTransactionImpl implements SplitTransaction {
     int created_a = 0;
     int created_b = 0;
     // Look for any exception
-    for (Future<Pair<Path, Path>> future : futures) {
+    for (Future<Pair<StorageIdentifier, StorageIdentifier>> future : futures) {
       try {
-        Pair<Path, Path> p = future.get();
+        Pair<StorageIdentifier, StorageIdentifier> p = future.get();
         created_a += p.getFirst() != null ? 1 : 0;
         created_b += p.getSecond() != null ? 1 : 0;
       } catch (InterruptedException e) {
@@ -657,32 +663,32 @@ public class SplitTransactionImpl implements SplitTransaction {
     return new Pair<Integer, Integer>(created_a, created_b);
   }
 
-  private Pair<Path, Path> splitStoreFile(final byte[] family, final StoreFile sf)
+  private Pair<StorageIdentifier, StorageIdentifier> splitStoreFile(final byte[] family, final StoreFile sf)
       throws IOException {
     if (LOG.isDebugEnabled()) {
         LOG.debug("Splitting started for store file: " + sf.getPath() + " for region: " +
                   this.parent);
     }
-    RegionFileSystem fs = this.parent.getRegionFileSystem();
+    RegionStorage fs = this.parent.getRegionStorage();
     String familyName = Bytes.toString(family);
-    Path path_a =
+    StorageIdentifier path_a =
         fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false,
           this.parent.getSplitPolicy());
-    Path path_b =
+    StorageIdentifier path_b =
         fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true,
           this.parent.getSplitPolicy());
     if (LOG.isDebugEnabled()) {
         LOG.debug("Splitting complete for store file: " + sf.getPath() + " for region: " +
                   this.parent);
     }
-    return new Pair<Path,Path>(path_a, path_b);
+    return new Pair<StorageIdentifier, StorageIdentifier>(path_a, path_b);
   }
 
   /**
    * Utility class used to do the file splitting / reference writing
    * in parallel instead of sequentially.
    */
-  private class StoreFileSplitter implements Callable<Pair<Path,Path>> {
+  private class StoreFileSplitter implements Callable<Pair<StorageIdentifier,StorageIdentifier>> {
     private final byte[] family;
     private final StoreFile sf;
 
@@ -696,7 +702,7 @@ public class SplitTransactionImpl implements SplitTransaction {
       this.family = family;
     }
 
-    public Pair<Path,Path> call() throws IOException {
+    public Pair<StorageIdentifier,StorageIdentifier> call() throws IOException {
       return splitStoreFile(family, sf);
     }
   }
@@ -741,7 +747,7 @@ public class SplitTransactionImpl implements SplitTransaction {
 
       case CREATE_SPLIT_DIR:
         this.parent.writestate.writesEnabled = true;
-        this.parent.getRegionFileSystem().cleanupSplitsDir();
+        this.parent.getRegionStorage().cleanupSplitsContainer();
         break;
 
       case CLOSED_PARENT_REGION:
@@ -760,11 +766,11 @@ public class SplitTransactionImpl implements SplitTransaction {
         break;
 
       case STARTED_REGION_A_CREATION:
-        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
+        this.parent.getRegionStorage().cleanupDaughterRegion(this.hri_a);
         break;
 
       case STARTED_REGION_B_CREATION:
-        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
+        this.parent.getRegionStorage().cleanupDaughterRegion(this.hri_b);
         break;
 
       case OFFLINED_PARENT:

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 853a4cf..62f1d58 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -157,7 +157,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
 
   FileSystem getFileSystem();
 
-
+  // TODO move createWriterInTmp calls to RegionStorage as somethign like "createTempStoreFileWriter"
   /**
    * @param maxKeyCount
    * @param compression Compression algorithm to use
@@ -298,7 +298,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
    * @param srcPathStr
    * @param sequenceId sequence Id associated with the HFile
    */
-  Path bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
+  StoreFile bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
 
   // General accessors into the state of the store
   // TODO abstract some of this out into a metrics class

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 589d844..071498b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -255,6 +255,7 @@ public class StoreFile {
     return this.fileInfo;
   }
 
+  // TODO replace with StorageIdentifier
   /**
    * @return Path or null if this StoreFile was made with a Stream.
    */
@@ -612,6 +613,7 @@ public class StoreFile {
     return false;
   }
 
+  // TODO move this into RegionStorage
   /**
    * @param fs
    * @param dir Directory to create file in.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index cb5d12c..17abd34 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -120,6 +120,7 @@ public class StoreFileWriter implements Compactor.CellSink {
     // it no longer writable.
     this.timeRangeTrackerSet = trt != null;
     this.timeRangeTracker = this.timeRangeTrackerSet? trt: new TimeRangeTracker();
+    // TODO move creation of the writer for the StoreFileWriter into RegionStorage
     writer = HFile.getWriterFactory(conf, cacheConf)
         .withPath(fs, path)
         .withComparator(comparator)
@@ -372,6 +373,7 @@ public class StoreFileWriter implements Compactor.CellSink {
       return this;
     }
 
+    // TODO replace with RegionStorage
     /**
      * Use either this method or {@link #withFilePath}, but not both.
      * @param dir Path to column family directory. The directory is created if
@@ -385,6 +387,7 @@ public class StoreFileWriter implements Compactor.CellSink {
       return this;
     }
 
+    // TODO replace with RegionStorage
     /**
      * Use either this method or {@link #withOutputDir}, but not both.
      * @param filePath the StoreFile path to write

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 4cde73d..81baa82 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -47,7 +47,8 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -103,6 +104,8 @@ import org.apache.hadoop.io.IOUtils;
  *    </ul>
  *  <li>restore the logs, if any
  * </ul>
+ *
+ * TODO update for MasterStorage / RegionStorage
  */
 @InterfaceAudience.Private
 public class RestoreSnapshotHelper {
@@ -780,8 +783,8 @@ public class RestoreSnapshotHelper {
 
     List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
     for (FileStatus regionDir: regionDirs) {
-      HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath());
-      regions.add(hri);
+      final RegionStorage rs = RegionStorage.open(conf, new LegacyPathIdentifier(regionDir.getPath()), false);
+      regions.add(rs.getRegionInfo());
     }
     LOG.debug("found " + regions.size() + " regions for table=" +
         tableDesc.getTableName().getNameAsString());

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index dfb706b..3fa715a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -250,8 +250,7 @@ public final class SnapshotManifest {
     boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
     try {
       // Open the RegionFS
-      Path rootDir = null;
-      RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, rootDir, regionInfo, false);
+      RegionStorage regionFs = RegionStorage.open(conf, regionInfo, false);
       monitor.rethrowException();
 
       // 1. dump region meta info into the snapshot directory

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index 2c5d76a..3ca48fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -36,7 +36,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -62,35 +64,36 @@ public final class SnapshotManifestV1 {
   private SnapshotManifestV1() {
   }
 
-  static class ManifestBuilder implements SnapshotManifest.RegionVisitor<RegionFileSystem, Path> {
+  // TODO update for RegionStorage
+  static class ManifestBuilder implements SnapshotManifest.RegionVisitor<RegionStorage, Path> {
     private final Configuration conf;
-    private final Path snapshotDir;
+    private final StorageIdentifier snapshotDir;
     private final FileSystem fs;
 
     public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
-      this.snapshotDir = snapshotDir;
+      this.snapshotDir = new LegacyPathIdentifier(snapshotDir);
       this.conf = conf;
       this.fs = fs;
     }
 
-    public RegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
-      RegionFileSystem snapshotRegionFs = RegionFileSystem.open(conf, fs,
+    public RegionStorage regionOpen(final HRegionInfo regionInfo) throws IOException {
+      RegionStorage snapshotRegionFs = RegionStorage.open(conf, fs,
           snapshotDir, regionInfo, true);
       return snapshotRegionFs;
     }
 
-    public void regionClose(final RegionFileSystem region) {
+    public void regionClose(final RegionStorage region) {
     }
 
-    public Path familyOpen(final RegionFileSystem snapshotRegionFs, final byte[] familyName) {
-      Path familyDir = snapshotRegionFs.getStoreDir(Bytes.toString(familyName));
+    public Path familyOpen(final RegionStorage snapshotRegionFs, final byte[] familyName) {
+      Path familyDir = ((LegacyPathIdentifier)snapshotRegionFs.getStoreContainer(Bytes.toString(familyName))).path;
       return familyDir;
     }
 
-    public void familyClose(final RegionFileSystem region, final Path family) {
+    public void familyClose(final RegionStorage region, final Path family) {
     }
 
-    public void storeFile(final RegionFileSystem region, final Path familyDir,
+    public void storeFile(final RegionStorage region, final Path familyDir,
         final StoreFileInfo storeFile) throws IOException {
       Path referenceFile = new Path(familyDir, storeFile.getPath().getName());
       boolean success = true;
@@ -125,8 +128,8 @@ public final class SnapshotManifestV1 {
       completionService.submit(new Callable<SnapshotRegionManifest>() {
         @Override
         public SnapshotRegionManifest call() throws IOException {
-          HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
-          return buildManifestFromDisk(conf, fs, snapshotDir, hri);
+          final RegionStorage rs = RegionStorage.open(conf, new LegacyPathIdentifier(region.getPath()), true);
+          return buildManifestFromDisk(conf, fs, snapshotDir, rs);
         }
       });
     }
@@ -154,13 +157,12 @@ public final class SnapshotManifestV1 {
   }
 
   static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
-    RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, tableDir, regionInfo, true);
+      final FileSystem fs, final Path tableDir, final RegionStorage regionFs) throws IOException {
     SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();
 
     // 1. dump region meta info into the snapshot directory
     LOG.debug("Storing region-info for snapshot.");
-    manifest.setRegionInfo(HRegionInfo.convert(regionInfo));
+    manifest.setRegionInfo(HRegionInfo.convert(regionFs.getRegionInfo()));
 
     // 2. iterate through all the stores in the region
     LOG.debug("Creating references for hfiles");

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
index d38d5c1..89124ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 class FSRegionScanner implements Runnable {
   static private final Log LOG = LogFactory.getLog(FSRegionScanner.class);
 
-  private final RegionFileSystem rfs;
+  private final RegionStorage rfs;
 
   /**
    * Maps region encoded names to maps of hostnames to fractional locality of
@@ -58,7 +58,7 @@ class FSRegionScanner implements Runnable {
   FSRegionScanner(Configuration conf, HRegionInfo hri,
                   Map<String, Map<String, Float>> regionDegreeLocalityMapping)
       throws IOException {
-    this.rfs = RegionFileSystem.open(conf, hri, true);
+    this.rfs = RegionStorage.open(conf, hri, true);
     this.regionDegreeLocalityMapping = regionDegreeLocalityMapping;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 80fbb76..662f20d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -74,8 +74,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -1093,8 +1093,9 @@ public abstract class FSUtils {
       int cfCount = 0;
       int cfFrag = 0;
       for (HRegionInfo hri: mfs.getRegions(table)) {
-        RegionFileSystem rfs = mfs.getRegionFileSystem(hri);
-        for (String family: rfs.getFamilies()) {
+        RegionStorage rfs = mfs.getRegionStorage(hri);
+        final Collection<String> families = rfs.getFamilies();
+        for (String family: families) {
           cfCount++;
           cfCountTotal++;
           if (rfs.getStoreFiles(family).size() > 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 2f34291..8a14cca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -111,9 +111,12 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.LegacyRegionStorage;
 import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -1022,11 +1025,15 @@ public class HBaseFsck extends Configured implements Closeable {
         Bytes.add(orphanRegionRange.getSecond(), new byte[1]));
     LOG.info("Creating new region : " + hri);
     HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
-    Path target = region.getRegionFileSystem().getRegionDir();
+    final StorageIdentifier target = region.getRegionStorage().getRegionContainer();
 
-    // rename all the data to new region
-    mergeRegionDirs(target, hi);
-    fixes++;
+    if (target instanceof LegacyPathIdentifier) {
+      // rename all the data to new region
+      mergeRegionDirs(((LegacyPathIdentifier)target).path, hi);
+      fixes++;
+    } else {
+      LOG.info("Skipped ");
+    }
   }
 
   /**
@@ -1191,7 +1198,7 @@ public class HBaseFsck extends Configured implements Closeable {
     }
 
     FileSystem fs = FileSystem.get(getConf());
-    HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+    HRegionInfo hri = LegacyRegionStorage.loadRegionInfoFileContent(fs, regionDir);
     LOG.debug("HRegionInfo read: " + hri.toString());
     hbi.hdfsEntry.hri = hri;
   }
@@ -1424,7 +1431,7 @@ public class HBaseFsck extends Configured implements Closeable {
         Collections.<WALActionsListener>singletonList(new MetricsWAL()),
         "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8))).
         getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace());
-    HRegion meta = HRegion.createHRegion(c, rootdir, metaDescriptor, metaHRI, wal);
+    HRegion meta = HRegion.createHRegion(c, metaDescriptor, metaHRI, wal);
     MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, true);
     return meta;
   }
@@ -2361,7 +2368,8 @@ public class HBaseFsck extends Configured implements Closeable {
                   LOG.warn(hri + " start and stop keys are in the range of " + region
                       + ". The region might not be cleaned up from hdfs when region " + region
                       + " split failed. Hence deleting from hdfs.");
-                  RegionFileSystem.destroy(getConf(), fs, regionDir.getParent(), hri);
+                  // TODO directly reference LegacyRegionStorage?
+                  RegionStorage.destroy(getConf(), fs, new LegacyPathIdentifier(regionDir.getParent()), hri);
                   return;
                 }
               }
@@ -3004,17 +3012,21 @@ public class HBaseFsck extends Configured implements Closeable {
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("[" + thread + "] Created new empty container region: " +
             newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
-        debugLsr(region.getRegionFileSystem().getRegionDir());
+        debugLsr(region.getRegionStorage().getRegionContainer());
 
         // all target regions are closed, should be able to safely cleanup.
         boolean didFix= false;
-        Path target = region.getRegionFileSystem().getRegionDir();
-        for (HbckInfo contained : overlap) {
-          LOG.info("[" + thread + "] Merging " + contained  + " into " + target );
-          int merges = mergeRegionDirs(target, contained);
-          if (merges > 0) {
-            didFix = true;
+        StorageIdentifier target = region.getRegionStorage().getRegionContainer();
+        if (target instanceof LegacyPathIdentifier) {
+          for (HbckInfo contained : overlap) {
+            LOG.info("[" + thread + "] Merging " + contained  + " into " + target );
+            int merges = mergeRegionDirs(((LegacyPathIdentifier)target).path, contained);
+            if (merges > 0) {
+              didFix = true;
+            }
           }
+        } else {
+          LOG.info("skipping merge into " + target + " because it is not a Path.");
         }
         if (didFix) {
           fixes++;
@@ -4146,7 +4158,7 @@ public class HBaseFsck extends Configured implements Closeable {
               try {
                 LOG.debug("Loading region info from hdfs:"+ regionDir.getPath());
 
-                Path regioninfoFile = new Path(he.hdfsRegionDir, LegacyLayout.REGION_INFO_FILE);
+                Path regioninfoFile = new Path(regionDir.getPath(), LegacyLayout.REGION_INFO_FILE);
                 boolean regioninfoFileExists = fs.exists(regioninfoFile);
 
                 if (!regioninfoFileExists) {
@@ -4866,6 +4878,14 @@ public class HBaseFsck extends Configured implements Closeable {
     debugLsr(conf, p, new PrintingErrorReporter());
   }
 
+  void debugLsr(StorageIdentifier id) throws IOException {
+    if (id instanceof LegacyPathIdentifier) {
+      debugLsr(((LegacyPathIdentifier)id).path);
+    } else {
+      LOG.debug("identifier '" + id + "' is not a Path; skipping long output.");
+    }
+  }
+
   /**
    * ls -r for debugging purposes
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index a07fed9..cf5c27c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -190,8 +190,7 @@ public class HBaseFsckRepair {
   public static HRegion createHDFSRegionDir(Configuration conf,
       HRegionInfo hri, HTableDescriptor htd) throws IOException {
     // Create HRegion
-    Path root = FSUtils.getRootDir(conf);
-    HRegion region = HRegion.createHRegion(conf, root, htd, hri, null);
+    HRegion region = HRegion.createHRegion(conf, htd, hri, null);
 
     // Close the new region to flush to disk. Close log file too.
     region.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 1959c73..e4d830e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -144,7 +144,7 @@ class HMerge {
     protected final WALFactory walFactory;
     private final long maxFilesize;
 
-
+    // TODO update for RegionStorage
     protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
     throws IOException {
       this.conf = conf;
@@ -192,14 +192,14 @@ class HMerge {
       long nextSize = 0;
       for (int i = 0; i < info.length - 1; i++) {
         if (currentRegion == null) {
-          currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd,
+          currentRegion = HRegion.openHRegion(info[i], this.htd,
               walFactory.getWAL(info[i].getEncodedNameAsBytes(),
-                info[i].getTable().getNamespace()));
+                info[i].getTable().getNamespace()), conf);
           currentSize = currentRegion.getLargestHStoreSize();
         }
-        nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd,
+        nextRegion = HRegion.openHRegion(info[i + 1], this.htd,
             walFactory.getWAL(info[i + 1].getEncodedNameAsBytes(),
-              info[i + 1].getTable().getNamespace()));
+              info[i + 1].getTable().getNamespace()), conf);
         nextSize = nextRegion.getLargestHStoreSize();
 
         if ((currentSize + nextSize) <= (maxFilesize / 2)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 4fe39ff..b670a26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -170,7 +170,7 @@ public abstract class ModifyRegionUtils {
     // unless I pass along via the conf.
     Configuration confForWAL = new Configuration(conf);
     confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
-    HRegion region = HRegion.createHRegion(conf, rootDir, hTableDescriptor, newRegion, null, false);
+    HRegion region = HRegion.createHRegion(conf, rootDir, hTableDescriptor, newRegion);
     try {
       // 2. Custom user code to interact with the created region
       if (task != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index cbc57dc..3793cc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -727,8 +727,7 @@ public class RegionSplitter {
             if (sk.length == 0)
               sk = splitAlgo.firstRow();
 
-            RegionFileSystem regionFs = RegionFileSystem.open(
-              connection.getConfiguration(), fs, tableDir, hri, true);
+            RegionStorage regionFs = RegionStorage.open(connection.getConfiguration(), hri, true);
 
             // Check every Column Family for that region -- check does not have references.
             boolean refFound = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index 6c2c882..370f03b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.protobuf.ServerProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -94,7 +94,7 @@ public class TestIOFencing {
     volatile CountDownLatch compactionsBlocked = new CountDownLatch(0);
     volatile CountDownLatch compactionsWaiting = new CountDownLatch(0);
 
-    public CompactionBlockerRegion(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public CompactionBlockerRegion(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }
@@ -153,7 +153,7 @@ public class TestIOFencing {
    */
   public static class BlockCompactionsInPrepRegion extends CompactionBlockerRegion {
 
-    public BlockCompactionsInPrepRegion(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public BlockCompactionsInPrepRegion(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }
@@ -176,7 +176,7 @@ public class TestIOFencing {
    * entry to go the WAL before blocking, but blocks afterwards
    */
   public static class BlockCompactionsInCompletionRegion extends CompactionBlockerRegion {
-    public BlockCompactionsInCompletionRegion(final RegionFileSystem rfs,
+    public BlockCompactionsInCompletionRegion(final RegionStorage rfs,
         final HTableDescriptor htd, final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }
@@ -281,7 +281,7 @@ public class TestIOFencing {
       // those entries
       HRegionInfo oldHri = new HRegionInfo(table.getName(),
         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
-      CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(oldHri,
+      CompactionDescriptor compactionDescriptor = ServerProtobufUtil.toCompactionDescriptor(oldHri,
         FAMILY, Lists.newArrayList(new Path("/a")), Lists.newArrayList(new Path("/b")),
         new Path("store_dir"));
       WALUtil.writeCompactionMarker(compactingRegion.getWAL(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index e30d719..a371000 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -136,7 +136,7 @@ public class TestHFileArchiving {
     FileSystem fs = UTIL.getTestFileSystem();
 
     // now attempt to depose the region
-    Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
+    Path rootDir = region.getRegionStorage().getTableDir().getParent();
     Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
 
     HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
@@ -185,7 +185,7 @@ public class TestHFileArchiving {
     assertEquals(1, servingRegions.size());
     HRegion region = servingRegions.get(0);
 
-    FileSystem fs = region.getRegionFileSystem().getFileSystem();
+    FileSystem fs = region.getRegionStorage().getFileSystem();
 
     // make sure there are some files in the regiondir
     Path rootDir = FSUtils.getRootDir(fs.getConf());

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index a5b0002..f5f380f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index fa4497f..bcc15cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -221,7 +221,7 @@ public class TestRegionObserverScannerOpenHook {
   public static class CompactionCompletionNotifyingRegion extends HRegion {
     private static volatile CountDownLatch compactionStateChangeLatch = null;
 
-    public CompactionCompletionNotifyingRegion(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public CompactionCompletionNotifyingRegion(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
index 89c8dfc..314d6b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index b7e980b..3ea193b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 38e0a61..acd1d72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -661,7 +661,7 @@ public class TestAtomicOperation {
 
   public static class MockHRegion extends HRegion {
 
-    public MockHRegion(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public MockHRegion(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index fa630a2..86c3968 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -174,7 +174,7 @@ public class TestCompaction {
       assertEquals(compactionThreshold, s.getStorefilesCount());
       assertTrue(s.getStorefilesSize() > 15*1000);
       // and no new store files persisted past compactStores()
-      FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
+      FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionStorage().getTempDir());
       assertEquals(0, ls.length);
 
     } finally {
@@ -249,11 +249,11 @@ public class TestCompaction {
     // Now lets corrupt the compacted file.
     FileSystem fs = store.getFileSystem();
     // default compaction policy created one and only one new compacted file
-    Path dstPath = store.getRegionFileSystem().createTempName();
+    Path dstPath = store.getRegionStorage().createTempName();
     FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
     stream.writeChars("CORRUPT FILE!!!!");
     stream.close();
-    Path origPath = store.getRegionFileSystem().commitStoreFile(
+    Path origPath = store.getRegionStorage().commitStoreFile(
       Bytes.toString(COLUMN_FAMILY), dstPath);
 
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
index 24b3667..4e39664 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
@@ -111,7 +111,7 @@ public class TestCompactionPolicy {
 
     store = new HStore(region, hcd, conf);
 
-    TEST_FILE = region.getRegionFileSystem().createTempName();
+    TEST_FILE = region.getRegionStorage().createTempName();
     fs.createNewFile(TEST_FILE);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
index 1325c85..140b038 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.fs.FsContext;
-import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.testclassification.MasterTests;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index ef332a4..95e19f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -91,13 +91,13 @@ public class TestDefaultCompactSelection extends TestCase {
     region = HBaseTestingUtility.createRegionAndWAL(info, basedir, conf, htd);
     HBaseTestingUtility.closeRegionAndWAL(region);
 
-    RegionFileSystem rfs = RegionFileSystem.open(conf, fs, basedir, info, false);
+    RegionStorage rfs = RegionStorage.open(conf, fs, basedir, info, false);
     region = new HRegion(rfs, htd,
       wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), null);
 
     store = new HStore(region, hcd, conf);
 
-    TEST_FILE = region.getRegionFileSystem().createTempName();
+    TEST_FILE = region.getRegionStorage().createTempName();
     fs.createNewFile(TEST_FILE);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index a324328..73e10f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.io.crypto.aes.AES;
 import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -159,7 +159,7 @@ public class TestHMobStore {
     FSUtils.setRootDir(walConf, basedir);
     final WALFactory wals = new WALFactory(walConf, null, methodName);
 
-    RegionFileSystem rfs = RegionFileSystem.open(conf, fs, basedir, info, false);
+    RegionStorage rfs = RegionStorage.open(conf, fs, basedir, info, false);
     region = new HRegion(rfs, htd,
       wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), null);
     store = new HMobStore(region, hcd, conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 783eef5..ab19ce3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -90,7 +90,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
@@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ServerProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
@@ -684,8 +685,8 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
     final WALFactory wals = new WALFactory(CONF, null, method);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
@@ -737,8 +738,8 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
     final WALFactory wals = new WALFactory(CONF, null, method);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
@@ -792,8 +793,8 @@ public class TestHRegion {
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, CONF, family);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
 
       Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
       for (int i = 1000; i < 1050; i += 10) {
@@ -827,8 +828,8 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
     final WALFactory wals = new WALFactory(CONF, null, method);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
       byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
 
@@ -896,8 +897,8 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
     final WALFactory wals = new WALFactory(CONF, null, method);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       long maxSeqId = 3;
@@ -925,14 +926,14 @@ public class TestHRegion {
       assertEquals(3, region.getStore(family).getStorefilesCount());
 
       // now find the compacted file, and manually add it to the recovered edits
-      Path tmpDir = region.getRegionFileSystem().getTempDir();
+      Path tmpDir = region.getRegionStorage().getTempDir();
       FileStatus[] files = FSUtils.listStatus(fs, tmpDir);
       String errorMsg = "Expected to find 1 file in the region temp directory "
           + "from the compaction, could not find any";
       assertNotNull(errorMsg, files);
       assertEquals(errorMsg, 1, files.length);
       // move the file inside region dir
-      Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family),
+      Path newFile = region.getRegionStorage().commitStoreFile(Bytes.toString(family),
           files[0].getPath());
 
       byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
@@ -942,10 +943,10 @@ public class TestHRegion {
         fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
       }
 
-      CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region
+      CompactionDescriptor compactionDescriptor = ServerProtobufUtil.toCompactionDescriptor(this.region
         .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family,
             storeFiles, Lists.newArrayList(newFile),
-            region.getRegionFileSystem().getStoreDir(Bytes.toString(family)));
+            region.getRegionStorage().getStoreDir(Bytes.toString(family)));
 
       WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(),
           this.region.getRegionInfo(), compactionDescriptor, region.getMVCC());
@@ -1012,8 +1013,8 @@ public class TestHRegion {
     this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
       HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
     try {
-      Path regiondir = region.getRegionFileSystem().getRegionDir();
-      FileSystem fs = region.getRegionFileSystem().getFileSystem();
+      Path regiondir = region.getRegionStorage().getRegionDir();
+      FileSystem fs = region.getRegionStorage().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       long maxSeqId = 3;
@@ -2639,9 +2640,9 @@ public class TestHRegion {
           HRegion.openHRegion(subregions[i], null);
           subregions[i].compactStores();
         }
-        Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
-        Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
-        Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
+        Path oldRegionPath = region.getRegionStorage().getRegionDir();
+        Path oldRegion1 = subregions[0].getRegionStorage().getRegionDir();
+        Path oldRegion2 = subregions[1].getRegionStorage().getRegionDir();
         long startTime = System.currentTimeMillis();
         region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
         LOG.info("Merge regions elapsed time: "
@@ -4498,8 +4499,8 @@ public class TestHRegion {
 
     // Create a region and skip the initialization (like CreateTableHandler)
     HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootDir, CONF, htd, false);
-    Path regionDir = region.getRegionFileSystem().getRegionDir();
-    FileSystem fs = region.getRegionFileSystem().getFileSystem();
+    Path regionDir = region.getRegionStorage().getRegionDir();
+    FileSystem fs = region.getRegionStorage().getFileSystem();
     HBaseTestingUtility.closeRegionAndWAL(region);
 
     Path regionInfoFile = LegacyLayout.getRegionInfoFile(regionDir);
@@ -4510,7 +4511,7 @@ public class TestHRegion {
 
     // Try to open the region
     region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
-    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
+    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
     HBaseTestingUtility.closeRegionAndWAL(region);
 
     // Verify that the .regioninfo file is still there
@@ -4524,7 +4525,7 @@ public class TestHRegion {
 
     region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
 //    region = TEST_UTIL.openHRegion(hri, htd);
-    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
+    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
     HBaseTestingUtility.closeRegionAndWAL(region);
 
     // Verify that the .regioninfo file is still there
@@ -5030,8 +5031,8 @@ public class TestHRegion {
 
       // move the file of the primary region to the archive, simulating a compaction
       Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
-      primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
-      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem()
+      primaryRegion.getRegionStorage().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
+      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionStorage()
           .getStoreFiles(families[0]);
       Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
 
@@ -6090,7 +6091,7 @@ public class TestHRegion {
 
   // Helper for test testOpenRegionWrittenToWALForLogReplay
   static class HRegionWithSeqId extends HRegion {
-    public HRegionWithSeqId(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public HRegionWithSeqId(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
index 48609b9..808029c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.fs.FSUtilsWithRetries;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -50,9 +50,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({RegionServerTests.class, SmallTests.class})
-public class TestHRegionFileSystem {
+public class TestHRegionStorage {
   private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static final Log LOG = LogFactory.getLog(TestHRegionFileSystem.class);
+  private static final Log LOG = LogFactory.getLog(TestHRegionStorage.class);
 
   @Test
   public void testOnDiskRegionCreation() throws IOException {
@@ -62,22 +62,22 @@ public class TestHRegionFileSystem {
 
     // Create a Region
     HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
-    RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, rootDir, hri, true);
+    RegionStorage regionFs = RegionStorage.open(conf, fs, rootDir, hri, true);
 
     // Verify if the region is on disk
     Path regionDir = regionFs.getRegionDir();
     assertTrue("The region folder should be created", fs.exists(regionDir));
 
     // Verify the .regioninfo
-    HRegionInfo hriVerify = RegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+    HRegionInfo hriVerify = RegionStorage.open(conf, regionDir, false).getRegionInfo();
     assertEquals(hri, hriVerify);
 
     // Open the region
-    regionFs = RegionFileSystem.open(conf, fs, rootDir, hri, false);
+    regionFs = RegionStorage.open(conf, fs, rootDir, hri, false);
     assertEquals(regionDir, regionFs.getRegionDir());
 
     // Delete the region
-    RegionFileSystem.destroy(conf, fs, rootDir, hri);
+    RegionStorage.destroy(conf, fs, rootDir, hri);
     assertFalse("The region folder should be removed", fs.exists(regionDir));
 
     fs.delete(rootDir, true);
@@ -207,7 +207,7 @@ public class TestHRegionFileSystem {
     // Create a Region
     String familyName = "cf";
     HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
-    RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, rootDir, hri, true);
+    RegionStorage regionFs = RegionStorage.open(conf, fs, rootDir, hri, true);
 
     // New region, no store files
     Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
index 7e0a6f6..a790116 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@@ -79,15 +79,15 @@ public class TestHRegionInfo {
     long modtime2 = getModTime(r);
     assertEquals(modtime, modtime2);
     // Now load the file.
-    HRegionInfo deserializedHri = RegionFileSystem.loadRegionInfoFileContent(
-        r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
+    HRegionInfo deserializedHri = RegionStorage.open(r.getRegionStorage().getConfiguration(),
+        r.getRegionStorage.getRegionContainer(), false).getRegionInfo();
     assertTrue(hri.equals(deserializedHri));
     HBaseTestingUtility.closeRegionAndWAL(r);
   }
 
   long getModTime(final HRegion r) throws IOException {
-    FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus(
-      LegacyLayout.getRegionInfoFile(r.getRegionFileSystem().getRegionDir()));
+    FileStatus[] statuses = r.getRegionStorage().getFileSystem().listStatus(
+      LegacyLayout.getRegionInfoFile(r.getRegionStorage().getRegionDir()));
     assertTrue(statuses != null && statuses.length == 1);
     return statuses[0].getModificationTime();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
index 014aba7..8400883 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -101,7 +101,7 @@ public class TestRecoveredEdits {
     Path hbaseRootDir = TEST_UTIL.getDataTestDir();
     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
-    RegionFileSystem hrfs = RegionFileSystem.open(TEST_UTIL.getConfiguration(), fs, hbaseRootDir, hri, false);
+    RegionStorage hrfs = RegionStorage.open(TEST_UTIL.getConfiguration(), fs, hbaseRootDir, hri, false);
     if (fs.exists(hrfs.getRegionDir())) {
       LOG.info("Region directory already exists. Deleting.");
       fs.delete(hrfs.getRegionDir(), true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index 4630522..535d449 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -96,10 +97,10 @@ public class TestRegionMergeTransaction {
   public void teardown() throws IOException {
     for (HRegion region : new HRegion[] { region_a, region_b, region_c }) {
       if (region != null && !region.isClosed()) region.close();
-      if (this.fs.exists(region.getRegionFileSystem().getRegionDir())
-          && !this.fs.delete(region.getRegionFileSystem().getRegionDir(), true)) {
+      if (this.fs.exists(region.getRegionStorage().getRegionDir())
+          && !this.fs.delete(region.getRegionStorage().getRegionDir(), true)) {
         throw new IOException("Failed deleting of "
-            + region.getRegionFileSystem().getRegionDir());
+            + region.getRegionStorage().getRegionDir());
       }
     }
     if (this.wals != null) {
@@ -257,14 +258,16 @@ public class TestRegionMergeTransaction {
     Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
     HRegion mergedRegion = (HRegion)mt.execute(mockServer, null);
     // Do some assertions about execution.
-    assertTrue(this.fs.exists(mt.getMergesDir()));
+    // TODO move tests to rely on RegionStorage
+    final Path mergesdir = ((LegacyPathIdentifier)mt.getMergesDir()).path;
+    assertTrue(this.fs.exists(mergesdir));
     // Assert region_a and region_b is closed.
     assertTrue(region_a.isClosed());
     assertTrue(region_b.isClosed());
 
     // Assert mergedir is empty -- because its content will have been moved out
     // to be under the merged region dirs.
-    assertEquals(0, this.fs.listStatus(mt.getMergesDir()).length);
+    assertEquals(0, this.fs.listStatus(mergesdir).length);
     // Check merged region have correct key span.
     assertTrue(Bytes.equals(this.region_a.getRegionInfo().getStartKey(),
         mergedRegion.getRegionInfo().getStartKey()));
@@ -376,7 +379,7 @@ public class TestRegionMergeTransaction {
     // Make sure that merged region is still in the filesystem, that
     // they have not been removed; this is supposed to be the case if we go
     // past point of no return.
-    Path tableDir = this.region_a.getRegionFileSystem().getRegionDir()
+    Path tableDir = this.region_a.getRegionStorage().getRegionDir()
         .getParent();
     Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo()
         .getEncodedName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 326bbef..ee3d7d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -251,7 +251,7 @@ public class TestRegionMergeTransactionOnCluster {
       admin.compactRegion(mergedRegionInfo.getRegionName());
       // wait until merged region doesn't have reference file
       long timeout = System.currentTimeMillis() + waitTime;
-      RegionFileSystem hrfs = RegionFileSystem.open(
+      RegionStorage hrfs = RegionStorage.open(
           TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo, false);
       while (System.currentTimeMillis() < timeout) {
         for(HColumnDescriptor colFamily : columnFamilies) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
index 52e8724..14903ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.client.ScannerCallable;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
-import org.apache.hadoop.hbase.fs.RegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
@@ -445,7 +445,7 @@ public class TestScannerHeartbeatMessages {
     private static volatile int columnFamilySleepTime = DEFAULT_CF_SLEEP_TIME;
     private static volatile boolean sleepBetweenColumnFamilies = false;
 
-    public HeartbeatHRegion(final RegionFileSystem rfs, final HTableDescriptor htd,
+    public HeartbeatHRegion(final RegionStorage rfs, final HTableDescriptor htd,
         final WAL wal, final RegionServerServices rsServices) {
       super(rfs, htd, wal, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2986c971/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
index 2ade27a..0ae0f86 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
@@ -100,7 +100,7 @@ public class TestSplitTransaction {
 
   @After public void teardown() throws IOException {
     if (this.parent != null && !this.parent.isClosed()) this.parent.close();
-    Path regionDir = this.parent.getRegionFileSystem().getRegionDir();
+    Path regionDir = this.parent.getRegionStorage().getRegionDir();
     if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
       throw new IOException("Failed delete of " + regionDir);
     }
@@ -143,7 +143,7 @@ public class TestSplitTransaction {
     // Make sure that region a and region b are still in the filesystem, that
     // they have not been removed; this is supposed to be the case if we go
     // past point of no return.
-    Path tableDir =  this.parent.getRegionFileSystem().getTableDir();
+    Path tableDir =  this.parent.getRegionStorage().getTableDir();
     Path daughterADir = new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
     Path daughterBDir = new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
     assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir));
@@ -245,13 +245,13 @@ public class TestSplitTransaction {
     when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
     PairOfSameType<Region> daughters = st.execute(mockServer, null);
     // Do some assertions about execution.
-    assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
+    assertTrue(this.fs.exists(this.parent.getRegionStorage().getSplitsDir()));
     // Assert the parent region is closed.
     assertTrue(this.parent.isClosed());
 
     // Assert splitdir is empty -- because its content will have been moved out
     // to be under the daughter region dirs.
-    assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
+    assertEquals(0, this.fs.listStatus(this.parent.getRegionStorage().getSplitsDir()).length);
     // Check daughters have correct key span.
     assertTrue(Bytes.equals(parent.getRegionInfo().getStartKey(),
       daughters.getFirst().getRegionInfo().getStartKey()));
@@ -288,7 +288,7 @@ public class TestSplitTransaction {
     SplitTransactionImpl spiedUponSt = spy(st);
     doThrow(new IOException("Failing split. Expected reference file count isn't equal."))
         .when(spiedUponSt).assertReferenceFileCount(anyInt(),
-        eq(new Path(this.parent.getRegionFileSystem().getTableDir(),
+        eq(new Path(this.parent.getRegionStorage().getTableDir(),
             st.getSecondDaughter().getEncodedName())));
 
     // Run the execute.  Look at what it returns.
@@ -315,7 +315,7 @@ public class TestSplitTransaction {
     SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion);
     SplitTransactionImpl spiedUponSt = spy(st);
     doNothing().when(spiedUponSt).assertReferenceFileCount(anyInt(),
-        eq(parent.getRegionFileSystem().getSplitsDir(st.getFirstDaughter())));
+        eq(parent.getRegionStorage().getSplitsDir(st.getFirstDaughter())));
     when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())).
         thenThrow(new MockedFailedDaughterCreation());
     // Run the execute.  Look at what it returns.