You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/09/29 16:08:30 UTC

[09/18] hbase git commit: HBASE-14439 Moved from MasterFileSystem to MasterStorage abstract class for storage abstraction layer.

HBASE-14439 Moved from MasterFileSystem to MasterStorage abstract class for storage abstraction layer.

Code compiles with 'mvn -DskipTests clean compile'.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9f759626
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9f759626
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9f759626

Branch: refs/heads/hbase-14439
Commit: 9f7596269c40e09d113e807e91b1a6235c4f0410
Parents: 9fddf3c
Author: Umesh Agashe <ua...@cloudera.com>
Authored: Fri Sep 23 16:40:39 2016 -0700
Committer: Sean Busbey <bu...@apache.org>
Committed: Thu Sep 29 11:07:29 2016 -0500

----------------------------------------------------------------------
 .../SplitLogManagerCoordination.java            |   4 +-
 .../org/apache/hadoop/hbase/fs/FsContext.java   |  28 --
 .../hadoop/hbase/fs/MasterFileSystem.java       | 330 -------------------
 .../apache/hadoop/hbase/fs/MasterStorage.java   | 271 +++++++++++++++
 .../apache/hadoop/hbase/fs/RegionStorage.java   |  63 +---
 .../apache/hadoop/hbase/fs/StorageContext.java  |  28 ++
 .../hbase/fs/legacy/HFileArchiveUtil.java       |   4 +-
 .../hbase/fs/legacy/LegacyMasterFileSystem.java | 164 ++++++---
 .../hadoop/hbase/master/CatalogJanitor.java     |  10 +-
 .../hbase/master/ClusterStatusPublisher.java    |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  29 +-
 .../hadoop/hbase/master/MasterServices.java     |   6 +-
 .../hadoop/hbase/master/MasterWalManager.java   |  22 +-
 .../hadoop/hbase/master/RegionStates.java       |   6 +-
 .../procedure/CloneSnapshotProcedure.java       |  31 +-
 .../procedure/CreateNamespaceProcedure.java     |   3 +-
 .../master/procedure/CreateTableProcedure.java  |  20 +-
 .../procedure/DeleteNamespaceProcedure.java     |   8 +-
 .../master/procedure/DeleteTableProcedure.java  |  25 +-
 .../procedure/MasterDDLOperationHelper.java     |   8 +-
 .../procedure/RestoreSnapshotProcedure.java     |  15 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   2 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  43 ++-
 .../master/snapshot/TakeSnapshotHandler.java    |   5 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |  28 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  27 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  12 +-
 .../TestHColumnDescriptorDefaultVersions.java   |   5 +-
 .../org/apache/hadoop/hbase/TestNamespace.java  |   2 +-
 .../client/TestRestoreSnapshotFromClient.java   |   6 +-
 .../client/TestSnapshotCloneIndependence.java   |   5 +-
 .../hbase/client/TestSnapshotFromClient.java    |  12 +-
 .../hbase/client/TestSnapshotMetadata.java      |   4 +-
 .../hbase/client/TestTableSnapshotScanner.java  |   2 +-
 .../hadoop/hbase/fs/TestBlockReorder.java       |   2 +-
 .../TableSnapshotInputFormatTestBase.java       |   5 +-
 .../hadoop/hbase/mapreduce/TestWALPlayer.java   |   2 +-
 .../hbase/master/MockNoopMasterServices.java    |   2 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  22 +-
 .../master/TestDistributedLogSplitting.java     |  11 +-
 .../hbase/master/TestMasterFileSystem.java      |   5 +-
 .../master/cleaner/TestSnapshotFromMaster.java  |   2 +-
 .../MasterProcedureTestingUtility.java          |  10 +-
 .../TestMasterFailoverWithProcedures.java       |   4 +-
 .../procedure/TestMasterProcedureWalLease.java  |   6 +-
 ...stTableDescriptorModificationFromClient.java |   5 +-
 .../master/snapshot/TestSnapshotManager.java    |   6 +-
 .../hbase/namespace/TestNamespaceAuditor.java   |   5 +-
 .../regionserver/TestCompactSplitThread.java    |   2 +-
 .../TestCorruptedRegionStoreFile.java           |  13 +-
 .../TestRegionMergeTransactionOnCluster.java    |   4 +-
 .../TestScannerRetriableFailure.java            |   4 +-
 .../TestSplitTransactionOnCluster.java          |  15 +-
 .../hbase/snapshot/SnapshotTestingUtils.java    |   8 +-
 .../hbase/snapshot/TestExportSnapshot.java      |   4 +-
 .../snapshot/TestFlushSnapshotFromClient.java   |  20 +-
 .../TestRestoreFlushSnapshotFromClient.java     |   2 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   2 +-
 58 files changed, 666 insertions(+), 725 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 3afce96..3561330 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -42,11 +42,11 @@ import com.google.common.annotations.VisibleForTesting;
  * <P>
  * Methods required for task life circle: <BR>
  * {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
- * {@link org.apache.hadoop.hbase.fs.MasterFileSystem} <BR>
+ * {@link org.apache.hadoop.hbase.fs.MasterStorage} <BR>
  * {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
  * recovering. Called after all tasks processed <BR>
  * {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
- * {@link org.apache.hadoop.hbase.fs.MasterFileSystem} after Active Master is initialized <BR>
+ * {@link org.apache.hadoop.hbase.fs.MasterStorage} after Active Master is initialized <BR>
  * {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
  * recovery has been made<BR>
  * {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/FsContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/FsContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/FsContext.java
deleted file mode 100644
index ced095d..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/FsContext.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.fs;
-
-public enum FsContext {
-  TEMP,
-  DATA,
-  ARCHIVE,
-  SNAPSHOT,
-  SIDELINE,
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterFileSystem.java
deleted file mode 100644
index 3bc89d4..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterFileSystem.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.fs;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterId;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.legacy.LegacyMasterFileSystem;
-import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobUtils;
-
-@InterfaceAudience.Private
-public abstract class MasterFileSystem {
-  private static Log LOG = LogFactory.getLog(MasterFileSystem.class);
-
-  // Persisted unique cluster ID
-  private ClusterId clusterId;
-
-
-  private Configuration conf;
-  private FileSystem fs;
-  private Path rootDir;
-
-  protected MasterFileSystem(Configuration conf, FileSystem fs, Path rootDir) {
-    this.rootDir = rootDir;
-    this.conf = conf;
-    this.fs = fs;
-  }
-
-  public Configuration getConfiguration() { return conf; }
-  public FileSystem getFileSystem() { return fs; }
-  public Path getRootDir() { return rootDir; }
-
-  // ==========================================================================
-  //  PUBLIC Interfaces - Visitors
-  // ==========================================================================
-  public interface NamespaceVisitor {
-    void visitNamespace(String namespace) throws IOException;
-  }
-
-  public interface TableVisitor {
-    void visitTable(TableName tableName) throws IOException;
-  }
-
-  public interface RegionVisitor {
-    void visitRegion(HRegionInfo regionInfo) throws IOException;
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - Namespace related
-  // ==========================================================================
-  public abstract void createNamespace(NamespaceDescriptor nsDescriptor) throws IOException;
-  public abstract void deleteNamespace(String namespaceName) throws IOException;
-  public abstract Collection<String> getNamespaces(FsContext ctx) throws IOException;
-
-  public Collection<String> getNamespaces() throws IOException {
-    return getNamespaces(FsContext.DATA);
-  }
-  // should return or get a NamespaceDescriptor? how is that different from HTD?
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table Descriptor related
-  // ==========================================================================
-  public HTableDescriptor getTableDescriptor(TableName tableName)
-      throws IOException {
-    return getTableDescriptor(FsContext.DATA, tableName);
-  }
-
-  public boolean createTableDescriptor(HTableDescriptor tableDesc, boolean force)
-      throws IOException {
-    return createTableDescriptor(FsContext.DATA, tableDesc, force);
-  }
-
-  public void updateTableDescriptor(HTableDescriptor tableDesc) throws IOException {
-    updateTableDescriptor(FsContext.DATA, tableDesc);
-  }
-
-  public abstract HTableDescriptor getTableDescriptor(FsContext ctx, TableName tableName)
-      throws IOException;
-  public abstract boolean createTableDescriptor(FsContext ctx, HTableDescriptor tableDesc,
-      boolean force) throws IOException;
-  public abstract void updateTableDescriptor(FsContext ctx, HTableDescriptor tableDesc)
-      throws IOException;
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table related
-  // ==========================================================================
-  public void deleteTable(TableName tableName) throws IOException {
-    deleteTable(FsContext.DATA, tableName);
-  }
-
-  public Collection<TableName> getTables(String namespace) throws IOException {
-    return getTables(FsContext.DATA, namespace);
-  }
-
-  public abstract void deleteTable(FsContext ctx, TableName tableName) throws IOException;
-
-  public abstract Collection<TableName> getTables(FsContext ctx, String namespace)
-    throws IOException;
-
-  public Collection<TableName> getTables() throws IOException {
-    ArrayList<TableName> tables = new ArrayList<TableName>();
-    for (String ns: getNamespaces()) {
-      tables.addAll(getTables(ns));
-    }
-    return tables;
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table Region related
-  // ==========================================================================
-  public void deleteRegion(HRegionInfo regionInfo) throws IOException {
-    RegionStorage.destroy(conf, regionInfo);
-  }
-
-  public Collection<HRegionInfo> getRegions(TableName tableName) throws IOException {
-    return getRegions(FsContext.DATA, tableName);
-  }
-
-  public abstract Collection<HRegionInfo> getRegions(FsContext ctx, TableName tableName)
-    throws IOException;
-
-  // TODO: Move in HRegionStorage
-  public void deleteFamilyFromFS(HRegionInfo regionInfo, byte[] familyName, boolean hasMob)
-      throws IOException {
-    getRegionStorage(regionInfo).deleteFamily(Bytes.toString(familyName), hasMob);
-  }
-
-  public RegionStorage getRegionStorage(HRegionInfo regionInfo) throws IOException {
-    return RegionStorage.open(conf, regionInfo, false);
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - visitors
-  // ==========================================================================
-  public void visitStoreFiles(StoreFileVisitor visitor)
-      throws IOException {
-    visitStoreFiles(FsContext.DATA, visitor);
-  }
-
-  public void visitStoreFiles(String namespace, StoreFileVisitor visitor)
-      throws IOException {
-    visitStoreFiles(FsContext.DATA, namespace, visitor);
-  }
-
-  public void visitStoreFiles(TableName table, StoreFileVisitor visitor)
-      throws IOException {
-    visitStoreFiles(FsContext.DATA, table, visitor);
-  }
-
-  public void visitStoreFiles(FsContext ctx, StoreFileVisitor visitor)
-      throws IOException {
-    for (String namespace: getNamespaces()) {
-      visitStoreFiles(ctx, namespace, visitor);
-    }
-  }
-
-  public void visitStoreFiles(FsContext ctx, String namespace, StoreFileVisitor visitor)
-      throws IOException {
-    for (TableName tableName: getTables(namespace)) {
-      visitStoreFiles(ctx, tableName, visitor);
-    }
-  }
-
-  public void visitStoreFiles(FsContext ctx, TableName table, StoreFileVisitor visitor)
-      throws IOException {
-    for (HRegionInfo hri: getRegions(ctx, table)) {
-      RegionStorage.open(conf, hri, false).visitStoreFiles(visitor);
-    }
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - bootstrap
-  // ==========================================================================
-  public abstract Path getTempDir();
-
-  public void logFileSystemState(Log log) throws IOException {
-    FSUtils.logFileSystemState(getFileSystem(), getRootDir(), LOG);
-  }
-
-  /**
-   * @return The unique identifier generated for this cluster
-   */
-  public ClusterId getClusterId() {
-    return clusterId;
-  }
-
-  protected void bootstrap() throws IOException {
-    // check if the root directory exists
-    createInitialLayout(getRootDir(), conf, this.fs);
-
-    // check if temp directory exists and clean it
-    startupCleanup();
-  }
-
-  protected abstract void bootstrapMeta() throws IOException;
-  protected abstract void startupCleanup() throws IOException;
-
-  /**
-   * Create initial layout in filesystem.
-   * <ol>
-   * <li>Check if the meta region exists and is readable, if not create it.
-   * Create hbase.version and the hbase:meta directory if not one.
-   * </li>
-   * <li>Create a log archive directory for RS to put archived logs</li>
-   * </ol>
-   * Idempotent.
-   */
-  private void createInitialLayout(final Path rd, final Configuration c, final FileSystem fs)
-      throws IOException {
-    // If FS is in safe mode wait till out of it.
-    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
-
-    // Filesystem is good. Go ahead and check for hbase.rootdir.
-    try {
-      if (!fs.exists(rd)) {
-        fs.mkdirs(rd);
-        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
-        // We used to handle this by checking the current DN count and waiting until
-        // it is nonzero. With security, the check for datanode count doesn't work --
-        // it is a privileged op. So instead we adopt the strategy of the jobtracker
-        // and simply retry file creation during bootstrap indefinitely. As soon as
-        // there is one datanode it will succeed. Permission problems should have
-        // already been caught by mkdirs above.
-        FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
-          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-      } else {
-        if (!fs.isDirectory(rd)) {
-          throw new IllegalArgumentException(rd.toString() + " is not a directory");
-        }
-        // as above
-        FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
-          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-      }
-    } catch (DeserializationException de) {
-      LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
-      IOException ioe = new IOException();
-      ioe.initCause(de);
-      throw ioe;
-    } catch (IllegalArgumentException iae) {
-      LOG.fatal("Please fix invalid configuration for "
-        + HConstants.HBASE_DIR + " " + rd.toString(), iae);
-      throw iae;
-    }
-    // Make sure cluster ID exists
-    if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
-        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
-      FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
-    }
-    clusterId = FSUtils.getClusterId(fs, rd);
-
-    // Make sure the meta region exists!
-    bootstrapMeta();
-  }
-
-  // ==========================================================================
-  //  PUBLIC
-  // ==========================================================================
-  public static MasterFileSystem open(Configuration conf, boolean bootstrap)
-      throws IOException {
-    return open(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf), bootstrap);
-  }
-
-  public static MasterFileSystem open(Configuration conf, FileSystem fs,
-      Path rootDir, boolean bootstrap) throws IOException {
-    // Cover both bases, the old way of setting default fs and the new.
-    // We're supposed to run on 0.20 and 0.21 anyways.
-    fs = rootDir.getFileSystem(conf);
-    FSUtils.setFsDefault(conf, new Path(fs.getUri()));
-    // make sure the fs has the same conf
-    fs.setConf(conf);
-
-    MasterFileSystem mfs = getInstance(conf, fs, rootDir);
-    if (bootstrap) {
-      mfs.bootstrap();
-    }
-    HFileSystem.addLocationsOrderInterceptor(conf);
-    return mfs;
-  }
-
-  private static MasterFileSystem getInstance(Configuration conf, final FileSystem fs,
-      Path rootDir) throws IOException {
-    String fsType = conf.get("hbase.fs.layout.type", "legacy").toLowerCase();
-    switch (fsType) {
-      case "legacy":
-        return new LegacyMasterFileSystem(conf, fs, rootDir);
-      default:
-        throw new IOException("Invalid filesystem type " + fsType);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
new file mode 100644
index 0000000..7b62dea
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
@@ -0,0 +1,271 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.fs.legacy.LegacyMasterFileSystem;
+import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+public abstract class MasterStorage<IDENTIFIER extends StorageIdentifier> {
+  private static Log LOG = LogFactory.getLog(MasterStorage.class);
+
+  // Persisted unique cluster ID
+  private ClusterId clusterId;
+
+
+  private Configuration conf;
+  private FileSystem fs;  // TODO: definitely remove
+  private IDENTIFIER rootContainer;
+
+  protected MasterStorage(Configuration conf, FileSystem fs, IDENTIFIER rootContainer) {
+    this.rootContainer = rootContainer;
+    this.conf = conf;
+    this.fs = fs;
+  }
+
+  public Configuration getConfiguration() { return conf; }
+  public FileSystem getFileSystem() { return fs; }  // TODO: definitely remove
+  public IDENTIFIER getRootContainer() { return rootContainer; }
+
+  // ==========================================================================
+  //  PUBLIC Interfaces - Visitors
+  // ==========================================================================
+  public interface NamespaceVisitor {
+    void visitNamespace(String namespace) throws IOException;
+  }
+
+  public interface TableVisitor {
+    void visitTable(TableName tableName) throws IOException;
+  }
+
+  public interface RegionVisitor {
+    void visitRegion(HRegionInfo regionInfo) throws IOException;
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - Namespace related
+  // ==========================================================================
+  public abstract void createNamespace(NamespaceDescriptor nsDescriptor) throws IOException;
+  public abstract void deleteNamespace(String namespaceName) throws IOException;
+  public abstract Collection<String> getNamespaces(StorageContext ctx) throws IOException;
+
+  public Collection<String> getNamespaces() throws IOException {
+    return getNamespaces(StorageContext.DATA);
+  }
+  // should return or get a NamespaceDescriptor? how is that different from HTD?
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table Descriptor related
+  // ==========================================================================
+  public HTableDescriptor getTableDescriptor(TableName tableName)
+      throws IOException {
+    return getTableDescriptor(StorageContext.DATA, tableName);
+  }
+
+  public boolean createTableDescriptor(HTableDescriptor tableDesc, boolean force)
+      throws IOException {
+    return createTableDescriptor(StorageContext.DATA, tableDesc, force);
+  }
+
+  public void updateTableDescriptor(HTableDescriptor tableDesc) throws IOException {
+    updateTableDescriptor(StorageContext.DATA, tableDesc);
+  }
+
+  public abstract HTableDescriptor getTableDescriptor(StorageContext ctx, TableName tableName)
+      throws IOException;
+  public abstract boolean createTableDescriptor(StorageContext ctx, HTableDescriptor tableDesc,
+                                                boolean force) throws IOException;
+  public abstract void updateTableDescriptor(StorageContext ctx, HTableDescriptor tableDesc)
+      throws IOException;
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table related
+  // ==========================================================================
+  public void deleteTable(TableName tableName) throws IOException {
+    deleteTable(StorageContext.DATA, tableName);
+  }
+
+  public Collection<TableName> getTables(String namespace) throws IOException {
+    return getTables(StorageContext.DATA, namespace);
+  }
+
+  public abstract void deleteTable(StorageContext ctx, TableName tableName) throws IOException;
+
+  public abstract Collection<TableName> getTables(StorageContext ctx, String namespace)
+    throws IOException;
+
+  public Collection<TableName> getTables() throws IOException {
+    ArrayList<TableName> tables = new ArrayList<TableName>();
+    for (String ns: getNamespaces()) {
+      tables.addAll(getTables(ns));
+    }
+    return tables;
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table Region related
+  // ==========================================================================
+  public void deleteRegion(HRegionInfo regionInfo) throws IOException {
+    RegionStorage.destroy(conf, regionInfo);
+  }
+
+  public Collection<HRegionInfo> getRegions(TableName tableName) throws IOException {
+    return getRegions(StorageContext.DATA, tableName);
+  }
+
+  public abstract Collection<HRegionInfo> getRegions(StorageContext ctx, TableName tableName)
+    throws IOException;
+
+  // TODO: Move in HRegionStorage
+  public void deleteFamilyFromStorage(HRegionInfo regionInfo, byte[] familyName, boolean hasMob)
+      throws IOException {
+    getRegionStorage(regionInfo).deleteFamily(Bytes.toString(familyName), hasMob);
+  }
+
+  public RegionStorage getRegionStorage(HRegionInfo regionInfo) throws IOException {
+    return RegionStorage.open(conf, regionInfo, false);
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - visitors
+  // ==========================================================================
+  public void visitStoreFiles(StoreFileVisitor visitor)
+      throws IOException {
+    visitStoreFiles(StorageContext.DATA, visitor);
+  }
+
+  public void visitStoreFiles(String namespace, StoreFileVisitor visitor)
+      throws IOException {
+    visitStoreFiles(StorageContext.DATA, namespace, visitor);
+  }
+
+  public void visitStoreFiles(TableName table, StoreFileVisitor visitor)
+      throws IOException {
+    visitStoreFiles(StorageContext.DATA, table, visitor);
+  }
+
+  public void visitStoreFiles(StorageContext ctx, StoreFileVisitor visitor)
+      throws IOException {
+    for (String namespace: getNamespaces()) {
+      visitStoreFiles(ctx, namespace, visitor);
+    }
+  }
+
+  public void visitStoreFiles(StorageContext ctx, String namespace, StoreFileVisitor visitor)
+      throws IOException {
+    for (TableName tableName: getTables(namespace)) {
+      visitStoreFiles(ctx, tableName, visitor);
+    }
+  }
+
+  public void visitStoreFiles(StorageContext ctx, TableName table, StoreFileVisitor visitor)
+      throws IOException {
+    for (HRegionInfo hri: getRegions(ctx, table)) {
+      RegionStorage.open(conf, hri, false).visitStoreFiles(visitor);
+    }
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - bootstrap
+  // ==========================================================================
+  public abstract IDENTIFIER getTempContainer();
+
+  public abstract void logStorageState(Log log) throws IOException;
+
+  /**
+   * @return The unique identifier generated for this cluster
+   */
+  public ClusterId getClusterId() {
+    return clusterId;
+  }
+
+  /**
+   * Bootstrap MasterStorage
+   * @throws IOException
+   */
+  protected void bootstrap() throws IOException {
+    // Initialize
+    clusterId = startup();
+
+    // Make sure the meta region exists!
+    bootstrapMeta();
+
+    // check if temp directory exists and clean it
+    startupCleanup();
+  }
+
+  protected abstract ClusterId startup() throws IOException;
+  protected abstract void bootstrapMeta() throws IOException;
+  protected abstract void startupCleanup() throws IOException;
+
+  // ==========================================================================
+  //  PUBLIC
+  // ==========================================================================
+  public static MasterStorage open(Configuration conf, boolean bootstrap)
+      throws IOException {
+    return open(conf, FSUtils.getRootDir(conf), bootstrap);
+  }
+
+  public static MasterStorage open(Configuration conf, Path rootDir, boolean bootstrap)
+      throws IOException {
+    // Cover both bases, the old way of setting default fs and the new.
+    // We're supposed to run on 0.20 and 0.21 anyways.
+    FileSystem fs = rootDir.getFileSystem(conf);
+    FSUtils.setFsDefault(conf, new Path(fs.getUri()));
+    // make sure the fs has the same conf
+    fs.setConf(conf);
+
+    MasterStorage ms = getInstance(conf, fs, rootDir);
+    if (bootstrap) {
+      ms.bootstrap();
+    }
+    HFileSystem.addLocationsOrderInterceptor(conf);
+    return ms;
+  }
+
+  private static MasterStorage getInstance(Configuration conf, final FileSystem fs,
+                                           Path rootDir) throws IOException {
+    String storageType = conf.get("hbase.storage.type", "legacy").toLowerCase();
+    switch (storageType) {
+      case "legacy":
+        return new LegacyMasterFileSystem(conf, fs, new LegacyPathIdentifier(rootDir));
+      default:
+        throw new IOException("Invalid filesystem type " + storageType);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
index ba7d0bf..990b00b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
@@ -20,20 +20,15 @@
 package org.apache.hadoop.hbase.fs;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterId;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
@@ -43,62 +38,14 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobUtils;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.FSUtilsWithRetries;
-import org.apache.hadoop.hbase.fs.FsContext;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.MetaUtils;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobUtils;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.fs.HFileSystem;
+
 import org.apache.hadoop.hbase.regionserver.*;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 
 @InterfaceAudience.Private
 public abstract class RegionStorage<IDENTIFIER extends StorageIdentifier> {
@@ -331,12 +278,12 @@ public abstract class RegionStorage<IDENTIFIER extends StorageIdentifier> {
 
   private static RegionStorage getInstance(Configuration conf, FileSystem fs,
       StorageIdentifier rootContainer, StorageIdentifier regionContainer) throws IOException {
-    String fsType = conf.get("hbase.fs.storage.type", "legacy").toLowerCase();
-    switch (fsType) {
+    String storageType = conf.get("hbase.storage.type", "legacy").toLowerCase();
+    switch (storageType) {
       case "legacy":
         return new LegacyRegionStorage(conf, fs, (LegacyPathIdentifier)rootContainer, (LegacyPathIdentifier)regionContainer);
       default:
-        throw new IOException("Invalid filesystem type " + fsType);
+        throw new IOException("Invalid filesystem type " + storageType);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
new file mode 100644
index 0000000..cc324a9
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
@@ -0,0 +1,28 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+public enum StorageContext {
+  TEMP,
+  DATA,
+  ARCHIVE,
+  SNAPSHOT,
+  SIDELINE,
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/HFileArchiveUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/HFileArchiveUtil.java
index 2fbaa92..e422861 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/HFileArchiveUtil.java
@@ -133,7 +133,7 @@ public class HFileArchiveUtil {
 
   /**
    * Get the full path to the archive directory on the configured 
-   * {@link org.apache.hadoop.hbase.fs.MasterFileSystem}
+   * {@link org.apache.hadoop.hbase.fs.MasterStorage}
    * @param conf to look for archive directory name and root directory. Cannot be null. Notes for
    *          testing: requires a FileSystem root directory to be specified.
    * @return the full {@link Path} to the archive directory, as defined by the configuration
@@ -145,7 +145,7 @@ public class HFileArchiveUtil {
 
   /**
    * Get the full path to the archive directory on the configured 
-   * {@link org.apache.hadoop.hbase.fs.MasterFileSystem}
+   * {@link org.apache.hadoop.hbase.fs.MasterStorage}
    * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
    *          the archive path)
    * @return the full {@link Path} to the archive directory, as defined by the configuration

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
index 5cf363b..caa7ee5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
@@ -34,15 +34,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.FsContext;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.StorageContext;
+import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -52,7 +49,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 
 @InterfaceAudience.Private
-public class LegacyMasterFileSystem extends MasterFileSystem {
+public class LegacyMasterFileSystem extends MasterStorage<LegacyPathIdentifier> {
   private static final Log LOG = LogFactory.getLog(LegacyMasterFileSystem.class);
 
   private final Path sidelineDir;
@@ -82,18 +79,18 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
 
   private final boolean isSecurityEnabled;
 
-  public LegacyMasterFileSystem(Configuration conf, FileSystem fs, Path rootDir) {
+  public LegacyMasterFileSystem(Configuration conf, FileSystem fs, LegacyPathIdentifier rootDir) {
     super(conf, fs, rootDir);
 
     // base directories
-    this.sidelineDir = LegacyLayout.getSidelineDir(rootDir);
-    this.snapshotDir = LegacyLayout.getSnapshotDir(rootDir);
-    this.archiveDir = LegacyLayout.getArchiveDir(rootDir);
+    this.sidelineDir = LegacyLayout.getSidelineDir(rootDir.path);
+    this.snapshotDir = LegacyLayout.getSnapshotDir(rootDir.path);
+    this.archiveDir = LegacyLayout.getArchiveDir(rootDir.path);
     this.archiveDataDir = LegacyLayout.getDataDir(this.archiveDir);
-    this.dataDir = LegacyLayout.getDataDir(rootDir);
-    this.tmpDir = LegacyLayout.getTempDir(rootDir);
+    this.dataDir = LegacyLayout.getDataDir(rootDir.path);
+    this.tmpDir = LegacyLayout.getTempDir(rootDir.path);
     this.tmpDataDir = LegacyLayout.getDataDir(this.tmpDir);
-    this.bulkDir = LegacyLayout.getBulkDir(rootDir);
+    this.bulkDir = LegacyLayout.getBulkDir(rootDir.path);
 
     this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700"));
     this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
@@ -103,12 +100,12 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   //  PUBLIC Methods - Namespace related
   // ==========================================================================
   public void createNamespace(NamespaceDescriptor nsDescriptor) throws IOException {
-    getFileSystem().mkdirs(getNamespaceDir(FsContext.DATA, nsDescriptor.getName()));
+    getFileSystem().mkdirs(getNamespaceDir(StorageContext.DATA, nsDescriptor.getName()));
   }
 
   public void deleteNamespace(String namespaceName) throws IOException {
     FileSystem fs = getFileSystem();
-    Path nsDir = getNamespaceDir(FsContext.DATA, namespaceName);
+    Path nsDir = getNamespaceDir(StorageContext.DATA, namespaceName);
 
     try {
       for (FileStatus status : fs.listStatus(nsDir)) {
@@ -125,7 +122,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
     }
   }
 
-  public Collection<String> getNamespaces(FsContext ctx) throws IOException {
+  public Collection<String> getNamespaces(StorageContext ctx) throws IOException {
     FileStatus[] stats = FSUtils.listStatus(getFileSystem(), getNamespaceDir(ctx));
     if (stats == null) return Collections.emptyList();
 
@@ -142,20 +139,20 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   //  PUBLIC Methods - Table Descriptor related
   // ==========================================================================s
   @Override
-  public boolean createTableDescriptor(FsContext ctx, HTableDescriptor tableDesc, boolean force)
+  public boolean createTableDescriptor(StorageContext ctx, HTableDescriptor tableDesc, boolean force)
       throws IOException {
     return LegacyTableDescriptor.createTableDescriptor(getFileSystem(),
       getTableDir(ctx, tableDesc.getTableName()), tableDesc, force);
   }
 
   @Override
-  public void updateTableDescriptor(FsContext ctx, HTableDescriptor tableDesc) throws IOException {
+  public void updateTableDescriptor(StorageContext ctx, HTableDescriptor tableDesc) throws IOException {
     LegacyTableDescriptor.updateTableDescriptor(getFileSystem(),
         getTableDir(ctx, tableDesc.getTableName()), tableDesc);
   }
 
   @Override
-  public HTableDescriptor getTableDescriptor(FsContext ctx, TableName tableName)
+  public HTableDescriptor getTableDescriptor(StorageContext ctx, TableName tableName)
       throws IOException {
     return LegacyTableDescriptor.getTableDescriptorFromFs(
         getFileSystem(), getTableDir(ctx, tableName));
@@ -165,7 +162,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   //  PUBLIC Methods - Table related
   // ==========================================================================
   @Override
-  public void deleteTable(FsContext ctx, TableName tableName) throws IOException {
+  public void deleteTable(StorageContext ctx, TableName tableName) throws IOException {
     Path tableDir = getTableDir(ctx, tableName);
     if (!FSUtils.deleteDirectory(getFileSystem(), tableDir)) {
       throw new IOException("Failed delete of " + tableName);
@@ -173,7 +170,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   }
 
   @Override
-  public Collection<TableName> getTables(FsContext ctx, String namespace)
+  public Collection<TableName> getTables(StorageContext ctx, String namespace)
       throws IOException {
     FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
         getNamespaceDir(ctx, namespace), new FSUtils.UserTableDirFilter(getFileSystem()));
@@ -190,7 +187,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   //  PUBLIC Methods - Table Regions related
   // ==========================================================================
   @Override
-  public Collection<HRegionInfo> getRegions(FsContext ctx, TableName tableName)
+  public Collection<HRegionInfo> getRegions(StorageContext ctx, TableName tableName)
       throws IOException {
     FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
         getTableDir(ctx, tableName), new FSUtils.RegionDirFilter(getFileSystem()));
@@ -215,10 +212,96 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   // ==========================================================================
   //  PROTECTED Methods - Bootstrap
   // ==========================================================================
+
+  /**
+   * Create initial layout in filesystem.
+   * <ol>
+   * <li>Check if the meta region exists and is readable, if not create it.
+   * Create hbase.version and the hbase:meta directory if not one.
+   * </li>
+   * <li>Create a log archive directory for RS to put archived logs</li>
+   * </ol>
+   * Idempotent.
+   * @throws IOException
+   */
+  @Override
+  protected ClusterId startup() throws IOException {
+    Configuration c = getConfiguration();
+    Path rc = ((LegacyPathIdentifier)getRootContainer()).path;
+    FileSystem fs = getFileSystem();
+
+    // If FS is in safe mode wait till out of it.
+    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
+
+    boolean isSecurityEnabled = "kerberos".equalsIgnoreCase(c.get("hbase.security.authentication"));
+    FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", "700"));
+
+    // Filesystem is good. Go ahead and check for hbase.rootdir.
+    try {
+      if (!fs.exists(rc)) {
+        if (isSecurityEnabled) {
+          fs.mkdirs(rc, rootDirPerms);
+        } else {
+          fs.mkdirs(rc);
+        }
+        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
+        // We used to handle this by checking the current DN count and waiting until
+        // it is nonzero. With security, the check for datanode count doesn't work --
+        // it is a privileged op. So instead we adopt the strategy of the jobtracker
+        // and simply retry file creation during bootstrap indefinitely. As soon as
+        // there is one datanode it will succeed. Permission problems should have
+        // already been caught by mkdirs above.
+        FSUtils.setVersion(fs, rc, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
+            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+      } else {
+        if (!fs.isDirectory(rc)) {
+          throw new IllegalArgumentException(rc.toString() + " is not a directory");
+        }
+        if (isSecurityEnabled && !rootDirPerms.equals(fs.getFileStatus(rc).getPermission())) {
+          // check whether the permission match
+          LOG.warn("Found rootdir permissions NOT matching expected \"hbase.rootdir.perms\" for "
+              + "rootdir=" + rc.toString() + " permissions=" + fs.getFileStatus(rc).getPermission()
+              + " and  \"hbase.rootdir.perms\" configured as "
+              + c.get("hbase.rootdir.perms", "700") + ". Automatically setting the permissions. You"
+              + " can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml "
+              + "and restarting the master");
+          fs.setPermission(rc, rootDirPerms);
+        }
+        // as above
+        FSUtils.checkVersion(fs, rc, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
+            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+      }
+    } catch (DeserializationException de) {
+      LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
+      IOException ioe = new IOException();
+      ioe.initCause(de);
+      throw ioe;
+    } catch (IllegalArgumentException iae) {
+      LOG.fatal("Please fix invalid configuration for "
+          + HConstants.HBASE_DIR + " " + rc.toString(), iae);
+      throw iae;
+    }
+    // Make sure cluster ID exists
+    if (!FSUtils.checkClusterIdExists(fs, rc, c.getInt(
+        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
+      FSUtils.setClusterId(fs, rc, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10
+          * 1000));
+    }
+    return FSUtils.getClusterId(fs, rc);
+  }
+
+  @Override
+  public void logStorageState(Log log) throws IOException {
+    FSUtils.logFileSystemState(getFileSystem(), ((LegacyPathIdentifier)getRootContainer()).path,
+        LOG);
+  }
+
   @Override
   protected void bootstrapMeta() throws IOException {
     // TODO ask RegionStorage
-    if (!FSUtils.metaRegionExists(getFileSystem(), getRootDir())) {
+    if (!FSUtils.metaRegionExists(getFileSystem(), getRootContainer().path)) {
       bootstrapMeta(getConfiguration());
     }
 
@@ -254,7 +337,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   protected void startupCleanup() throws IOException {
     final FileSystem fs = getFileSystem();
     // Check the directories under rootdir.
-    checkTempDir(getTempDir(), getConfiguration(), fs);
+    checkTempDir(getTempContainer().path, getConfiguration(), getFileSystem());
     final String[] protectedSubDirs = new String[] {
         HConstants.BASE_NAMESPACE_DIR,
         HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -266,7 +349,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
         MobConstants.MOB_DIR_NAME
     };
     for (String subDir : protectedSubDirs) {
-      checkSubDir(new Path(getRootDir(), subDir));
+      checkSubDir(new Path(getRootContainer().path, subDir));
     }
 
     checkStagingDir();
@@ -274,17 +357,17 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
     // Handle the last few special files and set the final rootDir permissions
     // rootDir needs 'x' for all to support bulk load staging dir
     if (isSecurityEnabled) {
-      fs.setPermission(new Path(getRootDir(), HConstants.VERSION_FILE_NAME), secureRootFilePerms);
-      fs.setPermission(new Path(getRootDir(), HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
+      fs.setPermission(new Path(getRootContainer().path, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
+      fs.setPermission(new Path(getRootContainer().path, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
     }
-    FsPermission currentRootPerms = fs.getFileStatus(getRootDir()).getPermission();
+    FsPermission currentRootPerms = fs.getFileStatus(getRootContainer().path).getPermission();
     if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
         || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
         || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
       LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. "
         + "Automatically adding 'excute' permission for all");
       fs.setPermission(
-        getRootDir(),
+        getRootContainer().path,
         new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms
             .getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or(
           FsAction.EXECUTE)));
@@ -303,7 +386,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
       // if not the cleaner will take care of them.
       for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
         for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
-          HFileArchiver.archiveRegion(fs, getRootDir(), tabledir, regiondir);
+          HFileArchiver.archiveRegion(fs, getRootContainer().path, tabledir, regiondir);
         }
       }
       if (!fs.delete(tmpdir, true)) {
@@ -361,7 +444,7 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
    */
   private void checkStagingDir() throws IOException {
     final FileSystem fs = getFileSystem();
-    Path p = new Path(getRootDir(), HConstants.BULKLOAD_STAGING_DIR_NAME);
+    Path p = new Path(getRootContainer().path, HConstants.BULKLOAD_STAGING_DIR_NAME);
     try {
       if (!fs.exists(p)) {
         if (!fs.mkdirs(p, HiddenDirPerms)) {
@@ -380,27 +463,28 @@ public class LegacyMasterFileSystem extends MasterFileSystem {
   // ==========================================================================
   //  PROTECTED Methods - Path
   // ==========================================================================
-  protected Path getNamespaceDir(FsContext ctx) {
+  protected Path getNamespaceDir(StorageContext ctx) {
     return getBaseDirFromContext(ctx);
   }
 
-  protected Path getNamespaceDir(FsContext ctx, String namespace) {
+  protected Path getNamespaceDir(StorageContext ctx, String namespace) {
     return LegacyLayout.getNamespaceDir(getBaseDirFromContext(ctx), namespace);
   }
 
-  protected Path getTableDir(FsContext ctx, TableName table) {
+  protected Path getTableDir(StorageContext ctx, TableName table) {
     return LegacyLayout.getTableDir(getBaseDirFromContext(ctx), table);
   }
 
-  protected Path getRegionDir(FsContext ctx, TableName table, HRegionInfo hri) {
+  protected Path getRegionDir(StorageContext ctx, TableName table, HRegionInfo hri) {
     return LegacyLayout.getRegionDir(getTableDir(ctx, table), hri);
   }
 
-  public Path getTempDir() {
-    return tmpDir;
+  @Override
+  public LegacyPathIdentifier getTempContainer() {
+    return new LegacyPathIdentifier(tmpDir);
   }
 
-  protected Path getBaseDirFromContext(FsContext ctx) {
+  protected Path getBaseDirFromContext(StorageContext ctx) {
     switch (ctx) {
       case TEMP: return tmpDataDir;
       case DATA: return dataDir;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index afddaf6..6f958f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -207,7 +208,7 @@ public class CatalogJanitor extends ScheduledChore {
           + regionB.getRegionNameAsString()
           + " from fs because merged region no longer holds references");
       // TODO update HFileArchiver to use RegionStorage
-      FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
+      FileSystem fs = this.services.getMasterStorage().getFileSystem();
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
       MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), mergedRegion);
@@ -353,7 +354,7 @@ public class CatalogJanitor extends ScheduledChore {
     if (hasNoReferences(a) && hasNoReferences(b)) {
       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
         " because daughter splits no longer hold references");
-      FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
+      FileSystem fs = this.services.getMasterStorage().getFileSystem();
       if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
       MetaTableAccessor.deleteRegion(this.connection, parent);
@@ -389,8 +390,9 @@ public class CatalogJanitor extends ScheduledChore {
       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
 
-    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
-    Path rootdir = this.services.getMasterFileSystem().getRootDir();
+    FileSystem fs = this.services.getMasterStorage().getFileSystem();
+    Path rootdir = ((LegacyPathIdentifier) this.services.getMasterStorage().getRootContainer())
+        .path;
     Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
 
     Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index 194e023..41c25e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -163,7 +163,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
     // This could be extended in the future, for example if we want to send stuff like the
     //  hbase:meta server name.
     ClusterStatus cs = new ClusterStatus(VersionInfo.getVersion(),
-        master.getMasterFileSystem().getClusterId().toString(),
+        master.getMasterStorage().getClusterId().toString(),
         null,
         sns,
         master.getServerName(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c9056e3..9181579 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -89,8 +89,9 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
@@ -119,7 +120,6 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.Procedu
 import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
 import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.mob.MobConstants;
@@ -142,8 +142,6 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
 import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -164,7 +162,6 @@ import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
 import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
@@ -275,8 +272,8 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   // Metrics for the HMaster
   final MetricsMaster metricsMaster;
-  // file system manager for the master FS operations
-  private MasterFileSystem fileSystemManager;
+  // storage manager for the master storage operations
+  private MasterStorage storageManager;
   private MasterWalManager walManager;
 
   // server manager to deal with region server info
@@ -682,7 +679,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     this.masterActiveTime = System.currentTimeMillis();
     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
-    this.fileSystemManager = MasterFileSystem.open(conf, true);
+    this.storageManager = MasterStorage.open(conf, true);
     this.walManager = new MasterWalManager(this);
 
     // enable table descriptors cache
@@ -698,7 +695,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     // publish cluster ID
     status.setStatus("Publishing Cluster ID in ZooKeeper");
-    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
+    ZKClusterId.setClusterId(this.zooKeeper, storageManager.getClusterId());
     this.initLatch.countDown();
 
     this.serverManager = createServerManager(this);
@@ -922,8 +919,8 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public MasterFileSystem getMasterFileSystem() {
-    return this.fileSystemManager;
+  public MasterStorage getMasterStorage() {
+    return this.storageManager;
   }
 
   @Override
@@ -976,7 +973,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
     Map<String, Object> params = new HashMap<String, Object>();
     params.put(MASTER, this);
-    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
+    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterStorage()
         .getFileSystem(), archiveDir, params);
     getChoreService().scheduleChore(hfileCleaner);
     serviceStarted = true;
@@ -1030,10 +1027,10 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   private void startProcedureExecutor() throws IOException {
     final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
-    final Path logDir = new Path(fileSystemManager.getRootDir(),
+    final Path logDir = new Path(((LegacyPathIdentifier) storageManager.getRootContainer()).path,
         MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
 
-    procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
+    procedureStore = new WALProcedureStore(conf, storageManager.getFileSystem(), logDir,
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
     procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
     procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
@@ -2133,8 +2130,8 @@ public class HMaster extends HRegionServer implements MasterServices {
         }});
     }
 
-    String clusterId = fileSystemManager != null ?
-      fileSystemManager.getClusterId().toString() : null;
+    String clusterId = storageManager != null ?
+      storageManager.getClusterId().toString() : null;
     Set<RegionState> regionsInTransition = assignmentManager != null ?
       assignmentManager.getRegionStates().getRegionsInTransition() : null;
     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 3dfe4a6..2a6704d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
@@ -68,9 +68,9 @@ public interface MasterServices extends Server {
   AssignmentManager getAssignmentManager();
 
   /**
-   * @return Master's filesystem {@link MasterFileSystem} utility class.
+   * @return Master's filesystem {@link MasterStorage} utility class.
    */
-  MasterFileSystem getMasterFileSystem();
+  MasterStorage getMasterStorage();
 
   /**
    * @return Master's WALs {@link MasterWalManager} utility class.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index e90b744..28b4c63 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -79,7 +81,7 @@ public class MasterWalManager {
 
   // The Path to the old logs dir
   private final Path oldLogDir;
-  private final Path rootDir;
+  private final StorageIdentifier rootContainer;
 
   // create the split log lock
   private final Lock splitLogLock = new ReentrantLock();
@@ -90,20 +92,21 @@ public class MasterWalManager {
   private volatile boolean fsOk = true;
 
   public MasterWalManager(MasterServices services) throws IOException {
-    this(services.getConfiguration(), services.getMasterFileSystem().getFileSystem(),
-      services.getMasterFileSystem().getRootDir(), services);
+    this(services.getConfiguration(), services.getMasterStorage().getFileSystem(),
+      services.getMasterStorage().getRootContainer(), services);
   }
 
-  public MasterWalManager(Configuration conf, FileSystem fs, Path rootDir, MasterServices services)
-      throws IOException {
+  public MasterWalManager(Configuration conf, FileSystem fs, StorageIdentifier rootContainer,
+                          MasterServices services) throws IOException {
     this.fs = fs;
     this.conf = conf;
-    this.rootDir = rootDir;
+    this.rootContainer = rootContainer;
     this.services = services;
     this.splitLogManager = new SplitLogManager(services, conf);
     this.distributedLogReplay = this.splitLogManager.isLogReplaying();
 
-    this.oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+    this.oldLogDir = new Path(((LegacyPathIdentifier) rootContainer).path, HConstants
+        .HREGION_OLDLOGDIR_NAME);
   }
 
   public void stop() {
@@ -156,7 +159,8 @@ public class MasterWalManager {
         WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
 
     Set<ServerName> serverNames = new HashSet<ServerName>();
-    Path logsDirPath = new Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME);
+    Path logsDirPath = new Path(((LegacyPathIdentifier) this.rootContainer).path, HConstants
+        .HREGION_LOGDIR_NAME);
 
     do {
       if (services.isStopped()) {
@@ -254,7 +258,7 @@ public class MasterWalManager {
     }
     try {
       for (ServerName serverName : serverNames) {
-        Path logDir = new Path(this.rootDir,
+        Path logDir = new Path(((LegacyPathIdentifier) this.rootContainer).path,
           AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
         Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
         // Rename the directory so a rogue RS doesn't create more WALs

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index e875f30..8b70685 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -52,7 +49,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -679,7 +675,7 @@ public class RegionStates {
         // This is a cleanup task. Not critical.
         if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) == null) {
           regionOffline(hri);
-          server.getMasterFileSystem().deleteRegion(hri);
+          server.getMasterStorage().deleteRegion(hri);
         }
       } catch (IOException e) {
         LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index f52e321..f3cabbd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -41,8 +40,10 @@ import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.fs.FsContext;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.StorageContext;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MetricsSnapshot;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions;
@@ -52,7 +53,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -308,13 +308,13 @@ public class CloneSnapshotProcedure
       throws IOException, InterruptedException {
     if (!getTableName().isSystemTable()) {
       // Check and update namespace quota
-      final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+      final MasterStorage ms = env.getMasterServices().getMasterStorage();
 
       SnapshotManifest manifest = SnapshotManifest.open(
         env.getMasterConfiguration(),
-        mfs.getFileSystem(),
-        SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()),
-        snapshot);
+        ms.getFileSystem(),
+        SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, ((LegacyPathIdentifier) ms
+            .getRootContainer()).path), snapshot);
 
       ProcedureSyncWait.getMasterQuotaManager(env)
         .checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
@@ -358,9 +358,9 @@ public class CloneSnapshotProcedure
         final Path tableRootDir, final TableName tableName,
         final List<HRegionInfo> newRegions) throws IOException {
 
-        final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-        final FileSystem fs = mfs.getFileSystem();
-        final Path rootDir = mfs.getRootDir();
+        final MasterStorage ms = env.getMasterServices().getMasterStorage();
+        final FileSystem fs = ms.getFileSystem();
+        final StorageIdentifier rootContainer = ms.getRootContainer();
         final Configuration conf = env.getMasterConfiguration();
         final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
 
@@ -368,7 +368,8 @@ public class CloneSnapshotProcedure
 
         try {
           // 1. Execute the on-disk Clone
-          Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+          Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot,
+              ((LegacyPathIdentifier) rootContainer).path);
           SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
           RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
             conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
@@ -413,14 +414,14 @@ public class CloneSnapshotProcedure
     final HTableDescriptor hTableDescriptor,
     List<HRegionInfo> newRegions,
     final CreateHdfsRegions hdfsRegionHandler) throws IOException {
-    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-    final Path tempdir = mfs.getTempDir();
+    final MasterStorage ms = env.getMasterServices().getMasterStorage();
+    final Path tempdir = ((LegacyPathIdentifier)ms.getTempContainer()).path;
 
     // 1. Create Table Descriptor
     // using a copy of descriptor, table will be created enabling first
     HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
-    mfs.createTableDescriptor(FsContext.TEMP, underConstruction, false);
+    ms.createTableDescriptor(StorageContext.TEMP, underConstruction, false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 7562a04..0b4dc5d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.master.TableNamespaceManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to create a new namespace.
@@ -202,7 +201,7 @@ public class CreateNamespaceProcedure
   protected static void createDirectory(
       final MasterProcedureEnv env,
       final NamespaceDescriptor nsDescriptor) throws IOException {
-    env.getMasterServices().getMasterFileSystem().createNamespace(nsDescriptor);
+    env.getMasterServices().getMasterStorage().createNamespace(nsDescriptor);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 196cb16..e44192d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -38,11 +37,11 @@ import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.fs.FsContext;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.StorageContext;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
@@ -294,14 +293,14 @@ public class CreateTableProcedure
   protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
       final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
       final CreateHdfsRegions hdfsRegionHandler) throws IOException {
-    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-    final Path tempdir = mfs.getTempDir();
+    final MasterStorage ms = env.getMasterServices().getMasterStorage();
+    final Path tempdir = ((LegacyPathIdentifier) ms.getTempContainer()).path;
 
     // 1. Create Table Descriptor
     // using a copy of descriptor, table will be created enabling first
     HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
-    mfs.createTableDescriptor(FsContext.TEMP, underConstruction, false);
+    ms.createTableDescriptor(StorageContext.TEMP, underConstruction, false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
@@ -317,9 +316,10 @@ public class CreateTableProcedure
     final MasterProcedureEnv env,
     final HTableDescriptor hTableDescriptor,
     final Path tempTableDir) throws IOException {
-    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-    final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
-    FileSystem fs = mfs.getFileSystem();
+    final MasterStorage ms = env.getMasterServices().getMasterStorage();
+    final Path tableDir = FSUtils.getTableDir(((LegacyPathIdentifier)ms.getRootContainer()).path,
+        hTableDescriptor.getTableName());
+    FileSystem fs = ms.getFileSystem();
 
     if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
       throw new IOException("Couldn't delete " + tableDir);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
index c6ca374..2069a6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
@@ -25,20 +25,14 @@ import java.io.OutputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.fs.MasterFileSystem;
 import org.apache.hadoop.hbase.master.TableNamespaceManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to remove a namespace.
@@ -275,7 +269,7 @@ public class DeleteNamespaceProcedure
   protected static void deleteDirectory(
       final MasterProcedureEnv env,
       final String namespaceName) throws IOException {
-    env.getMasterServices().getMasterFileSystem().deleteNamespace(namespaceName);
+    env.getMasterServices().getMasterStorage().deleteNamespace(namespaceName);
   }
 
   /**