You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2015/09/16 00:25:54 UTC

[3/4] hbase git commit: Layout Abstraction

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
new file mode 100644
index 0000000..5378a69
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.fs.layout.StandardHBaseFsLayout;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class HierarchicalHRegionFileSystem extends HRegionFileSystem {
+  protected HierarchicalHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir,
+      HRegionInfo regionInfo) {
+    super(conf, fs, tableDir, regionInfo);
+  }
+
+  @Override
+  void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws IOException {
+    fs.mkdirs(dest.getParent());
+    super.moveNewRegionFromTmpDirToRegionDir(source, dest);
+  }
+
+  // Probably will never use this function for real, just in tests to compare
+  // humongous vs regular region dir functionality
+  @VisibleForTesting
+  public Path getStandadHBaseRegionDir() {
+    return StandardHBaseFsLayout.get().getRegionDir(tableDir, regionInfoForFs.getEncodedName());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
new file mode 100644
index 0000000..fbca254
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+public class HierarchicalHRegionFileSystemFactory extends HRegionFileSystemFactory {
+  @Override
+  public HRegionFileSystem create(Configuration conf, FileSystem fs, Path tableDir,
+      HRegionInfo regionInfo) {
+    return new HierarchicalHRegionFileSystem(conf, fs, tableDir, regionInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index 95be873..a7f8495 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
@@ -359,35 +358,35 @@ public class SplitTransactionImpl implements SplitTransaction {
 
     transition(SplitTransactionPhase.STARTED_REGION_A_CREATION);
 
-    assertReferenceFileCount(expectedReferences.getFirst(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_a));
+    assertReferenceFileCountOfSplitsDir(expectedReferences.getFirst(), this.hri_a);
     HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
-    assertReferenceFileCount(expectedReferences.getFirst(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName()));
+    assertReferenceFileCountOfDaughterDir(expectedReferences.getFirst(), this.hri_a);
 
     // Ditto
 
     transition(SplitTransactionPhase.STARTED_REGION_B_CREATION);
 
-    assertReferenceFileCount(expectedReferences.getSecond(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_b));
+    assertReferenceFileCountOfSplitsDir(expectedReferences.getSecond(), this.hri_b);
     HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
-    assertReferenceFileCount(expectedReferences.getSecond(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName()));
+    assertReferenceFileCountOfDaughterDir(expectedReferences.getSecond(), this.hri_b);
 
     return new PairOfSameType<Region>(a, b);
   }
-
+  
   @VisibleForTesting
-  void assertReferenceFileCount(int expectedReferenceFileCount, Path dir)
+  void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter)
       throws IOException {
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(parent.getFilesystem(),
-          dir)) {
-      throw new IOException("Failing split. Expected reference file count isn't equal.");
-    }
+    this.parent.getRegionFileSystem().assertReferenceFileCountOfSplitsDir(
+      expectedReferenceFileCount, daughter);
   }
 
+  @VisibleForTesting
+  void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, HRegionInfo daughter)
+      throws IOException {
+    this.parent.getRegionFileSystem().assertReferenceFileCountOfDaughterDir(
+      expectedReferenceFileCount, daughter);
+  }
+  
   /**
    * Perform time consuming opening of the daughter regions.
    * @param server Hosting server instance.  Can be null when testing

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index 6516a3e..c5ef7fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -27,6 +27,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -63,7 +64,7 @@ public class StoreFileInfo {
    * Group 1, hfile/hfilelink pattern, is this file's id.
    * Group 2 '(.+)' is the reference's parent region name.
    */
-  private static final Pattern REF_NAME_PATTERN =
+  public static final Pattern REF_NAME_PATTERN =
     Pattern.compile(String.format("^(%s|%s)\\.(.+)$",
       HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX));
 
@@ -387,36 +388,38 @@ public class StoreFileInfo {
     Matcher m = REF_NAME_PATTERN.matcher(name);
     return m.matches() && m.groupCount() > 1;
   }
-
+  
   /*
    * Return path to the file referred to by a Reference.  Presumes a directory
    * hierarchy of <code>${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname</code>.
+   * Unless the table is a humongous table in which case the hierarchy is 
+   * <code>${hbase.rootdir}/data/${namespace}/tablename/bucket/regionname/familyname</code>.
+   * 
    * @param p Path to a Reference file.
    * @return Calculated path to parent region file.
    * @throws IllegalArgumentException when path regex fails to match.
    */
-  public static Path getReferredToFile(final Path p) {
-    Matcher m = REF_NAME_PATTERN.matcher(p.getName());
+  public static Path getReferredToFile(Path p) {
+    Matcher m = StoreFileInfo.REF_NAME_PATTERN.matcher(p.getName());
     if (m == null || !m.matches()) {
       LOG.warn("Failed match of store file name " + p.toString());
       throw new IllegalArgumentException("Failed match of store file name " +
           p.toString());
     }
-
+  
     // Other region name is suffix on the passed Reference file name
     String otherRegion = m.group(2);
     // Tabledir is up two directories from where Reference was written.
-    Path tableDir = p.getParent().getParent().getParent();
+    Path regionDir = p.getParent().getParent();
+    Path tableDir = FsLayout.getTableDirFromRegionDir(regionDir);
     String nameStrippedOfSuffix = m.group(1);
     if (LOG.isDebugEnabled()) {
       LOG.debug("reference '" + p + "' to region=" + otherRegion
         + " hfile=" + nameStrippedOfSuffix);
     }
-
-    // Build up new path with the referenced region in place of our current
-    // region in the reference path.  Also strip regionname suffix from name.
-    return new Path(new Path(new Path(tableDir, otherRegion),
-      p.getParent().getName()), nameStrippedOfSuffix);
+  
+    return new Path(new Path(FsLayout.getRegionDir(tableDir, otherRegion), p.getParent()
+          .getName()), nameStrippedOfSuffix);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index 5021c74..3baeeb2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -205,8 +206,11 @@ public class ExportSnapshot extends Configured implements Tool {
           TableName table =HFileLink.getReferencedTableName(inputPath.getName());
           String region = HFileLink.getReferencedRegionName(inputPath.getName());
           String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
-          path = new Path(FSUtils.getTableDir(new Path("./"), table),
-              new Path(region, new Path(family, hfile)));
+          // TODO: Currently assumes target cluster's layout is same as source cluster layout
+          // Add another config option?
+          Path tableDir = FSUtils.getTableDir(new Path("./"), table);
+          Path regionDir = FsLayout.getRegionDir(tableDir, region);
+          path = new Path(regionDir, new Path(family, hfile));
           break;
         case WAL:
           Path oldLogsDir = new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 441dbbf..e4b29ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -399,7 +400,7 @@ public class RestoreSnapshotHelper {
     Map<String, List<SnapshotRegionManifest.StoreFile>> snapshotFiles =
                 getRegionHFileReferences(regionManifest);
 
-    Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
+    Path regionDir = FsLayout.getRegionDir(tableDir, regionInfo);
     String tableName = tableDesc.getTableName().getNameAsString();
 
     // Restore families present in the table
@@ -531,7 +532,7 @@ public class RestoreSnapshotHelper {
    */
   private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo,
       final SnapshotRegionManifest manifest) throws IOException {
-    final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
+    final Path regionDir = FsLayout.getRegionDir(tableDir, region.getRegionInfo());
     final String tableName = tableDesc.getTableName().getNameAsString();
     for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) {
       Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8());
@@ -588,12 +589,12 @@ public class RestoreSnapshotHelper {
       final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
     String hfileName = storeFile.getName();
 
-    // Extract the referred information (hfile name and parent region)
-    Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(
-        snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()),
-        hfileName));
-    String snapshotRegionName = refPath.getParent().getParent().getName();
-    String fileName = refPath.getName();
+    Path referenceFile = new Path(new Path(FsLayout.getRegionDir(new Path(
+      snapshotTable.getNameAsString()), regionInfo), familyDir.getName()), hfileName);
+    Path referredToFile = StoreFileInfo.getReferredToFile(referenceFile);
+    
+    String snapshotRegionName = referredToFile.getParent().getParent().getName();
+    String fileName = referredToFile.getName();
 
     // The new reference should have the cloned region name as parent, if it is a clone.
     String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
@@ -619,8 +620,7 @@ public class RestoreSnapshotHelper {
       if (linkPath != null) {
         in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs);
       } else {
-        linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(),
-                        regionInfo.getEncodedName()), familyDir.getName()), hfileName);
+        linkPath = FsLayout.makeHFileLinkPath(snapshotManifest, regionInfo, familyDir.getName(), hfileName);
         in = fs.open(linkPath);
       }
       OutputStream out = fs.create(outPath);
@@ -651,8 +651,9 @@ public class RestoreSnapshotHelper {
    */
   public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
     HRegionInfo regionInfo = new HRegionInfo(tableDesc.getTableName(),
-                      snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
-                      snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
+        snapshotRegionInfo.getStartKey(),
+        snapshotRegionInfo.getEndKey(), snapshotRegionInfo.isSplit(),
+        snapshotRegionInfo.getRegionId());
     regionInfo.setOffline(snapshotRegionInfo.isOffline());
     return regionInfo;
   }
@@ -662,7 +663,7 @@ public class RestoreSnapshotHelper {
    */
   private List<HRegionInfo> getTableRegions() throws IOException {
     LOG.debug("get table regions: " + tableDir);
-    FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
     if (regionDirs == null) return null;
 
     List<HRegionInfo> regions = new LinkedList<HRegionInfo>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index 137acf3..ad7c93a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ExecutorCompletionService;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -114,7 +115,7 @@ public class SnapshotManifestV1 {
   static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf,
       final Executor executor,final FileSystem fs, final Path snapshotDir,
       final SnapshotDescription desc) throws IOException {
-    FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
+    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
     if (regions == null) {
       LOG.info("No regions under directory:" + snapshotDir);
       return null;
@@ -133,9 +134,9 @@ public class SnapshotManifestV1 {
     }
 
     ArrayList<SnapshotRegionManifest> regionsManifest =
-        new ArrayList<SnapshotRegionManifest>(regions.length);
+        new ArrayList<SnapshotRegionManifest>(regions.size());
     try {
-      for (int i = 0; i < regions.length; ++i) {
+      for (int i = 0; i < regions.size(); ++i) {
         regionsManifest.add(completionService.take().get());
       }
     } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
index dccbeb5..b0519fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
@@ -118,6 +118,8 @@ public class SnapshotManifestV2 {
   static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf,
       final Executor executor,final FileSystem fs, final Path snapshotDir,
       final SnapshotDescription desc) throws IOException {
+    // TODO: Not sure if there's anything I need to do here, made a path change
+    // in SnapshotManifestV1 but doesn't seem anything needed here.
     FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
       @Override
       public boolean accept(Path path) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 6d10351..ce51e27 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -64,13 +64,14 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -261,21 +262,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * Delete the region directory if exists.
-   * @param conf
-   * @param hri
-   * @return True if deleted the region directory.
-   * @throws IOException
-   */
-  public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
-  throws IOException {
-    Path rootDir = getRootDir(conf);
-    FileSystem fs = rootDir.getFileSystem(conf);
-    return deleteDirectory(fs,
-      new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
-  }
-
-  /**
    * Return the number of bytes that large input files should be optimally
    * be split into to minimize i/o time.
    *
@@ -660,7 +646,8 @@ public abstract class FSUtils {
   throws IOException, DeserializationException {
     String version = getVersion(fs, rootdir);
     if (version == null) {
-      if (!metaRegionExists(fs, rootdir)) {
+      // TODO: Doesn't feel like a utility should have a dependency like this
+      if (!MasterFileSystem.metaRegionExists(fs, rootdir)) {
         // rootDir is empty (no version file and no root region)
         // just create new version file (HBASE-1195)
         setVersion(fs, rootdir, wait, retries);
@@ -1017,22 +1004,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * Checks if meta region exists
-   *
-   * @param fs file system
-   * @param rootdir root directory of HBase installation
-   * @return true if exists
-   * @throws IOException e
-   */
-  @SuppressWarnings("deprecation")
-  public static boolean metaRegionExists(FileSystem fs, Path rootdir)
-  throws IOException {
-    Path metaRegionDir =
-      HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
-    return fs.exists(metaRegionDir);
-  }
-
-  /**
    * Compute HDFS blocks distribution of a given file, or a portion of the file
    * @param fs file system
    * @param status file status of the file
@@ -1070,10 +1041,10 @@ public abstract class FSUtils {
       final Path hbaseRootDir)
   throws IOException {
     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
-    PathFilter regionFilter = new RegionDirFilter(fs);
+    RegionDirFilter regionFilter = new RegionDirFilter(fs);
     PathFilter familyFilter = new FamilyDirFilter(fs);
-    for (Path d : tableDirs) {
-      FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
+    for (Path tableDir : tableDirs) {
+      List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter);
       for (FileStatus regionDir : regionDirs) {
         Path dd = regionDir.getPath();
         // Else its a region name.  Now look in region for families.
@@ -1143,13 +1114,13 @@ public abstract class FSUtils {
     Map<String, Integer> frags = new HashMap<String, Integer>();
     int cfCountTotal = 0;
     int cfFragTotal = 0;
-    PathFilter regionFilter = new RegionDirFilter(fs);
+    RegionDirFilter regionFilter = new RegionDirFilter(fs);
     PathFilter familyFilter = new FamilyDirFilter(fs);
     List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
-    for (Path d : tableDirs) {
+    for (Path tableDir : tableDirs) {
       int cfCount = 0;
       int cfFrag = 0;
-      FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
+      List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter);
       for (FileStatus regionDir : regionDirs) {
         Path dd = regionDir.getPath();
         // else its a region name, now look in region for families
@@ -1167,7 +1138,7 @@ public abstract class FSUtils {
         }
       }
       // compute percentage per table and store in result list
-      frags.put(FSUtils.getTableName(d).getNameAsString(),
+      frags.put(FSUtils.getTableName(tableDir).getNameAsString(),
         cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
     }
     // set overall percentage for all tables
@@ -1433,25 +1404,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * Given a particular table dir, return all the regiondirs inside it, excluding files such as
-   * .tableinfo
-   * @param fs A file system for the Path
-   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
-   * @return List of paths to valid region directories in table dir.
-   * @throws IOException
-   */
-  public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
-    // assumes we are in a table dir.
-    FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
-    List<Path> regionDirs = new ArrayList<Path>(rds.length);
-    for (FileStatus rdfs: rds) {
-      Path rdPath = rdfs.getPath();
-      regionDirs.add(rdPath);
-    }
-    return regionDirs;
-  }
-
-  /**
    * Filter for all dirs that are legal column family names.  This is generally used for colfam
    * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
    */
@@ -1616,12 +1568,11 @@ public abstract class FSUtils {
     // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
     // should be regions.
     PathFilter familyFilter = new FamilyDirFilter(fs);
-    FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
-    for (FileStatus regionDir : regionDirs) {
+    List<Path> regionDirs = FsLayout.getRegionDirPaths(fs, tableDir);
+    for (Path dd : regionDirs) {
       if (null != errors) {
         errors.progress();
       }
-      Path dd = regionDir.getPath();
       // else its a region name, now look in region for families
       FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
       for (FileStatus familyDir : familyDirs) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
index f0cc0c1..efd8124 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
@@ -19,11 +19,13 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.NavigableSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -71,7 +73,7 @@ public final class FSVisitor {
    */
   public static void visitRegions(final FileSystem fs, final Path tableDir,
       final RegionVisitor visitor) throws IOException {
-    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
     if (regions == null) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("No regions under directory:" + tableDir);
@@ -94,7 +96,7 @@ public final class FSVisitor {
    */
   public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir,
       final StoreFileVisitor visitor) throws IOException {
-    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
     if (regions == null) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("No regions under directory:" + tableDir);
@@ -156,7 +158,7 @@ public final class FSVisitor {
    */
   public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir,
       final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
-    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
     if (regions == null) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("No recoveredEdits regions under directory:" + tableDir);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index cc87f64..98ad22b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -62,6 +62,7 @@ import com.google.common.collect.Multimap;
 import com.google.common.collect.Ordering;
 import com.google.common.collect.TreeMultimap;
 import com.google.protobuf.ServiceException;
+
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.lang.StringUtils;
@@ -109,6 +110,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -754,13 +756,14 @@ public class HBaseFsck extends Configured implements Closeable {
       final RegionBoundariesInformation currentRegionBoundariesInformation =
           new RegionBoundariesInformation();
       Path hbaseRoot = FSUtils.getRootDir(getConf());
+      FileSystem fs = hbaseRoot.getFileSystem(getConf());
       for (HRegionInfo regionInfo : regions) {
         Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
         currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
         // For each region, get the start and stop key from the META and compare them to the
         // same information from the Stores.
-        Path path = new Path(tableDir, regionInfo.getEncodedName());
-        FileSystem fs = path.getFileSystem(getConf());
+        HRegionFileSystem hrfs = HRegionFileSystem.create(getConf(), fs, tableDir, regionInfo);
+        Path path = hrfs.getRegionDir();
         FileStatus[] files = fs.listStatus(path);
         // For all the column families in this region...
         byte[] storeFirstKey = null;
@@ -2276,8 +2279,8 @@ public class HBaseFsck extends Configured implements Closeable {
                   LOG.warn(hri + " start and stop keys are in the range of " + region
                       + ". The region might not be cleaned up from hdfs when region " + region
                       + " split failed. Hence deleting from hdfs.");
-                  HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs,
-                    regionDir.getParent(), hri);
+                  HRegionFileSystem.deleteAndArchiveRegionFromFileSystem(getConf(), fs,
+                    FsLayout.getTableDirFromRegionDir(regionDir), hri);
                   return;
                 }
               }
@@ -2804,8 +2807,8 @@ public class HBaseFsck extends Configured implements Closeable {
                 + "region and regioninfo in HDFS to plug the hole.", getTableInfo());
         HTableDescriptor htd = getTableInfo().getHTD();
         // from curEndKey to EMPTY_START_ROW
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
-            HConstants.EMPTY_START_ROW);
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
+            curEndKey, HConstants.EMPTY_START_ROW);
 
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Table region end key was not empty.  Created new empty region: " + newRegion
@@ -2915,8 +2918,8 @@ public class HBaseFsck extends Configured implements Closeable {
         // create new empty container region.
         HTableDescriptor htd = getTableInfo().getHTD();
         // from start key to end Key
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
-            range.getSecond());
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
+            range.getFirst(), range.getSecond());
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("[" + thread + "] Created new empty container region: " +
             newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
@@ -3601,7 +3604,7 @@ public class HBaseFsck extends Configured implements Closeable {
       } else if (this.hdfsEntry != null) {
         // we are only guaranteed to have a path and not an HRI for hdfsEntry,
         // so we get the name from the Path
-        Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent();
+        Path tableDir = FsLayout.getTableDirFromRegionDir(this.hdfsEntry.hdfsRegionDir);
         return FSUtils.getTableName(tableDir);
       } else {
         // return the info from the first online/deployed hri
@@ -4038,7 +4041,11 @@ public class HBaseFsck extends Configured implements Closeable {
     public synchronized Void call() throws IOException {
       try {
         // level 2: <HBASE_DIR>/<table>/*
-        FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
+        List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir.getPath(), 
+          new FSUtils.RegionDirFilter(fs));
+        if (regionDirs == null) {
+          return null;
+        }
         for (FileStatus regionDir : regionDirs) {
           errors.progress();
           String encodedName = regionDir.getPath().getName();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 937e9b2..1df19bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 
 /**
@@ -72,39 +71,6 @@ public class HFileArchiveUtil {
   }
 
   /**
-   * Get the archive directory for a given region under the specified table
-   * @param tableName the table name. Cannot be null.
-   * @param regiondir the path to the region directory. Cannot be null.
-   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
-   *         should not be archived
-   */
-  public static Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName,
-                                         Path regiondir) {
-    // get the archive directory for a table
-    Path archiveDir = getTableArchivePath(rootDir, tableName);
-
-    // then add on the region path under the archive
-    String encodedRegionName = regiondir.getName();
-    return HRegion.getRegionDir(archiveDir, encodedRegionName);
-  }
-
-  /**
-   * Get the archive directory for a given region under the specified table
-   * @param rootDir {@link Path} to the root directory where hbase files are stored (for building
-   *          the archive path)
-   * @param tableName name of the table to archive. Cannot be null.
-   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
-   *         should not be archived
-   */
-  public static Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName, String encodedRegionName) {
-    // get the archive directory for a table
-    Path archiveDir = getTableArchivePath(rootDir, tableName);
-    return HRegion.getRegionDir(archiveDir, encodedRegionName);
-  }
-
-  /**
    * Get the path to the table archive directory based on the configured archive directory.
    * <p>
    * Get the path to the table's archive directory.

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 29ab24e..303ed60 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -217,8 +218,9 @@ public class HFileCorruptionChecker {
    * @throws IOException
    */
   void checkTableDir(Path tableDir) throws IOException {
-    FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
-    if (rds.length == 0 && !fs.exists(tableDir)) {
+    // TODO: Maybe a TableFileSystem??
+    List<Path> rds = FsLayout.getRegionDirPaths(fs, tableDir);
+    if (rds.size() == 0 && !fs.exists(tableDir)) {
       // interestingly listStatus does not throw an exception if the path does not exist.
       LOG.warn("Table Directory " + tableDir +
           " does not exist.  Likely due to concurrent delete. Skipping.");
@@ -230,8 +232,7 @@ public class HFileCorruptionChecker {
     List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>();
     List<Future<Void>> rdFutures;
 
-    for (FileStatus rdFs : rds) {
-      Path rdDir = rdFs.getPath();
+    for (Path rdDir : rds) {
       RegionDirChecker work = new RegionDirChecker(rdDir);
       rdcs.add(work);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 9a26a24..9d92fd3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.master.SplitLogManager;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -512,21 +513,19 @@ public class WALSplitter {
    * @return Path to file into which to dump split log edits.
    * @throws IOException
    */
-  @SuppressWarnings("deprecation")
   static Path getRegionSplitEditsPath(final FileSystem fs,
       final Entry logEntry, final Path rootDir, boolean isCreate)
   throws IOException {
     Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTablename());
     String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName());
-    Path regiondir = HRegion.getRegionDir(tableDir, encodedRegionName);
-    Path dir = getRegionDirRecoveredEditsDir(regiondir);
-
+    Path regiondir = FsLayout.getRegionDir(tableDir, encodedRegionName);
     if (!fs.exists(regiondir)) {
-      LOG.info("This region's directory doesn't exist: "
-          + regiondir.toString() + ". It is very likely that it was" +
-          " already split so it's safe to discard those edits.");
-      return null;
+      LOG.info("This region's directory doesn't exist."
+          + " It is very likely that it was"
+          + " already split so it's safe to discard those edits.");
+      return null;  
     }
+    Path dir = getRegionDirRecoveredEditsDir(regiondir);
     if (fs.exists(dir) && fs.isFile(dir)) {
       Path tmp = new Path("/tmp");
       if (!fs.exists(tmp)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index e8b79a8..0cbd2f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1316,7 +1316,25 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
 
     return createTable(tableName, new byte[][] { family }, splitKeys);
   }
+  
+  /**
+   * Create a table with multiple regions.
+   * @param desc
+   * @param family
+   * @param numRegions
+   * @return An HTable instance for the created table.
+   * @throws IOException
+   */
+  public HTable createMultiRegionTable(HTableDescriptor desc, byte[] family, int numRegions)
+      throws IOException {
+    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
+    byte[] startKey = Bytes.toBytes("aaaaa");
+    byte[] endKey = Bytes.toBytes("zzzzz");
+    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
 
+    return createTable(desc, new byte[][] { family }, splitKeys, 
+      new Configuration(getConfiguration()));
+  }
 
   /**
    * Create a table.

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java
new file mode 100644
index 0000000..2c0fd6e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java
@@ -0,0 +1,184 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.HierarchicalHRegionFileSystem;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MediumTests.class, MiscTests.class})
+public class TestHumongousTable {
+  protected static final Log LOG = LogFactory.getLog(TestHumongousTable.class);
+  protected final static int NUM_SLAVES_BASE = 4;
+  private static HBaseTestingUtility TEST_UTIL;
+  private static Configuration CONF;
+  protected static HBaseAdmin ADMIN;
+  protected static FileSystem FS;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    FsLayout.setLayoutForTesting(HierarchicalFsLayout.get());
+    TEST_UTIL = new HBaseTestingUtility();
+    CONF = TEST_UTIL.getConfiguration();
+    TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE);
+    ADMIN = TEST_UTIL.getHBaseAdmin();
+    LOG.info("Done initializing cluster");
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    try {
+      TEST_UTIL.shutdownMiniCluster();
+    } finally {
+      FsLayout.reset();
+    }
+  }
+
+  @Before
+  public void beforeMethod() throws IOException {
+    for (HTableDescriptor desc : ADMIN.listTables(".*")) {
+      ADMIN.disableTable(desc.getTableName());
+      ADMIN.deleteTable(desc.getTableName());
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testCreateHumongousTable() throws IOException, InterruptedException {
+    // create a humongous table with splits
+    String tableNameStr = "testCreateHumongousTable";
+    TableName tableName = TableName.valueOf(tableNameStr);
+    String familyName = "col";
+    TableName testTable = TableName.valueOf(tableNameStr);
+    HTableDescriptor desc = new HTableDescriptor(testTable);
+    HColumnDescriptor family = new HColumnDescriptor(familyName);
+    desc.addFamily(family);
+    ADMIN.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
+
+    // check tableDir and table descriptor
+    Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), testTable);
+    FS = TEST_UTIL.getTestFileSystem();
+    assertTrue(FS.exists(tableDir));
+
+    // load table with rows and flush stores
+    Connection connection = TEST_UTIL.getConnection();
+    Table table = connection.getTable(testTable);
+    int rowCount = TEST_UTIL.loadTable(table, Bytes.toBytes(familyName));
+    ADMIN.flush(tableName);
+    assertEquals(TEST_UTIL.countRows(table), rowCount);
+    
+    verifyColumnFamilies(desc, testTable, familyName);
+    
+    // test alteration of humongous table too
+    String familyName2 = "col2";
+    HColumnDescriptor family2 = new HColumnDescriptor(familyName2);
+    ADMIN.addColumnFamily(tableName, family2);
+    
+    // Wait for async add column to finish
+    Thread.sleep(5000);
+    
+    TEST_UTIL.loadTable(table, Bytes.toBytes(familyName2));
+    ADMIN.flush(tableName);
+    
+    verifyColumnFamilies(desc, testTable, familyName, familyName2);
+    
+    // drop the test table
+    ADMIN.disableTable(testTable);
+    assertTrue(ADMIN.isTableDisabled(testTable));
+    ADMIN.deleteTable(testTable);
+    assertEquals(ADMIN.getTableRegions(testTable), null);
+    assertFalse(FS.exists(tableDir));
+  }
+  
+  private void verifyColumnFamilies(HTableDescriptor desc, TableName testTable, String... colFamNames) throws IOException {
+    Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), testTable);
+    
+    List<HRegionInfo> tableRegions = ADMIN.getTableRegions(testTable);
+    
+    // check region dirs and files on fs
+    for (HRegionInfo hri : tableRegions) {
+      // check region dir structure
+      HierarchicalHRegionFileSystem hrfs = (HierarchicalHRegionFileSystem) HRegionFileSystem.openRegionFromFileSystem(
+        CONF, FS, tableDir, hri, true);
+      
+      Path humongousRegionDir = hrfs.getRegionDir();
+      Path normalRegionDir = hrfs.getStandadHBaseRegionDir();
+      
+      assertTrue(FS.exists(humongousRegionDir));
+      assertFalse(FS.exists(normalRegionDir));
+      
+      String bucket = hri.getEncodedName().substring(
+          HRegionInfo.MD5_HEX_LENGTH
+              - HRegionFileSystem.HUMONGOUS_DIR_NAME_SIZE);
+      assertEquals(humongousRegionDir.getParent().getName(), bucket);
+      
+      FileStatus[] statList = FS.listStatus(humongousRegionDir);
+      Set<String> contents = new HashSet<String>();
+      
+      LOG.debug("Contents of humongous region dir: " + contents);
+      
+      for (FileStatus stat : statList) {
+        contents.add(stat.getPath().getName());
+      }
+      
+      assertTrue(contents.contains(HRegionFileSystem.REGION_INFO_FILE));
+      assertTrue(contents.contains(HConstants.HBASE_TEMP_DIRECTORY));
+      assertTrue(contents.contains(HConstants.RECOVERED_EDITS_DIR));
+      
+      for (String colFam : colFamNames) {
+        assertTrue(contents.contains(colFam));
+
+        // familyDir has one store file
+        Path famPath = new Path(humongousRegionDir, colFam);
+        assertEquals(1, FS.listStatus(famPath).length);
+      }
+      
+      assertEquals("Contents: " + contents + " and fam names: " + Arrays.toString(colFamNames), 
+        3 + colFamNames.length, contents.size());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index e30d719..2021ebb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -35,13 +35,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -66,8 +70,9 @@ import org.junit.experimental.categories.Category;
 public class TestHFileArchiving {
 
   private static final Log LOG = LogFactory.getLog(TestHFileArchiving.class);
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  static final byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
 
   /**
    * Setup the config for the cluster
@@ -117,7 +122,7 @@ public class TestHFileArchiving {
   public void testRemovesRegionDirOnArchive() throws Exception {
     TableName TABLE_NAME =
         TableName.valueOf("testRemovesRegionDirOnArchive");
-    UTIL.createTable(TABLE_NAME, TEST_FAM);
+    createTable(TABLE_NAME);
 
     final Admin admin = UTIL.getHBaseAdmin();
 
@@ -136,8 +141,7 @@ public class TestHFileArchiving {
     FileSystem fs = UTIL.getTestFileSystem();
 
     // now attempt to depose the region
-    Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
-    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+    Path regionDir = region.getRegionFileSystem().getRegionDir();
 
     HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
 
@@ -177,7 +181,7 @@ public class TestHFileArchiving {
   public void testDeleteRegionWithNoStoreFiles() throws Exception {
     TableName TABLE_NAME =
         TableName.valueOf("testDeleteRegionWithNoStoreFiles");
-    UTIL.createTable(TABLE_NAME, TEST_FAM);
+    createTable(TABLE_NAME);
 
     // get the current store files for the region
     List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
@@ -188,8 +192,7 @@ public class TestHFileArchiving {
     FileSystem fs = region.getRegionFileSystem().getFileSystem();
 
     // make sure there are some files in the regiondir
-    Path rootDir = FSUtils.getRootDir(fs.getConf());
-    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+    Path regionDir = region.getRegionFileSystem().getRegionDir();
     FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
     Assert.assertNotNull("No files in the region directory", regionFiles);
     if (LOG.isDebugEnabled()) {
@@ -226,7 +229,7 @@ public class TestHFileArchiving {
   public void testArchiveOnTableDelete() throws Exception {
     TableName TABLE_NAME =
         TableName.valueOf("testArchiveOnTableDelete");
-    UTIL.createTable(TABLE_NAME, TEST_FAM);
+    createTable(TABLE_NAME);
 
     List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
     // make sure we only have 1 region serving this table
@@ -306,7 +309,7 @@ public class TestHFileArchiving {
   public void testArchiveOnTableFamilyDelete() throws Exception {
     TableName TABLE_NAME =
         TableName.valueOf("testArchiveOnTableFamilyDelete");
-    UTIL.createTable(TABLE_NAME, new byte[][] {TEST_FAM, Bytes.toBytes("fam2")});
+    createTwoFamilyTable(TABLE_NAME);
 
     List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
     // make sure we only have 1 region serving this table
@@ -360,11 +363,16 @@ public class TestHFileArchiving {
     Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
     FileSystem fs = UTIL.getTestFileSystem();
 
+    TableName tableName = TableName.valueOf("table");
+
     Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
-    Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
-        TableName.valueOf("table")), "abcdef");
+    Path tableDir = FSUtils.getTableDir(new Path("./"), tableName);
+    HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(tableName, "abcdefabcdefabcdefabcdefabcdef12");
+    HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri);
+    Path regionDir = hrfs.getRegionDir();
     Path familyDir = new Path(regionDir, "cf");
 
+    Path absoluteTableDir = new Path(rootDir, tableDir);
     Path sourceRegionDir = new Path(rootDir, regionDir);
     fs.mkdirs(sourceRegionDir);
 
@@ -387,7 +395,7 @@ public class TestHFileArchiving {
         try {
           // Try to archive the file
           HFileArchiver.archiveRegion(fs, rootDir,
-              sourceRegionDir.getParent(), sourceRegionDir);
+            absoluteTableDir, sourceRegionDir);
 
           // The archiver succeded, the file is no longer in the original location
           // but it's in the archive location.
@@ -451,4 +459,21 @@ public class TestHFileArchiving {
     }
     return fileNames;
   }
+  
+  void createTable(TableName tn) throws Exception {
+    HTableDescriptor desc = makeDescriptor(tn);
+    desc.addFamily(new HColumnDescriptor(TEST_FAM));
+    UTIL.createTable(desc, null);  
+  }
+  
+  void createTwoFamilyTable(TableName tn) throws Exception {
+    HTableDescriptor desc = makeDescriptor(tn);
+    desc.addFamily(new HColumnDescriptor(TEST_FAM));
+    desc.addFamily(new HColumnDescriptor(TEST_FAM_2));
+    UTIL.createTable(desc, null);  
+  }
+  
+  HTableDescriptor makeDescriptor(TableName tn) {
+    return new HTableDescriptor(tn);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java
new file mode 100644
index 0000000..e346808
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({MediumTests.class, MiscTests.class})
+public class TestHumongousHFileArchiving extends TestHFileArchiving {
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    FsLayout.setLayoutForTesting(HierarchicalFsLayout.get());
+  }
+  
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    FsLayout.reset();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 821d5c2..67c0d50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -23,11 +23,15 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -35,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
@@ -286,13 +291,13 @@ public class TestRestoreSnapshotFromClient {
   }
 
   private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
+    Connection connection = TEST_UTIL.getConnection();
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    List<HRegionFileSystem> regions = mfs.getRegionFileSystems(TEST_UTIL.getConfiguration(), 
+      connection, tableName);
     Set<String> families = new HashSet<String>();
-    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
-      for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
-        families.add(familyDir.getName());
-      }
+    for (HRegionFileSystem hrfs : regions) {
+      families.addAll(hrfs.getFamilies());
     }
     return families;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java
new file mode 100644
index 0000000..ee217bf
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.layout;
+
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MediumTests.class, MiscTests.class})
+public class TestFsLayout {
+  private static Configuration conf;
+  private static HBaseTestingUtility TEST_UTIL;
+  
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf = HBaseConfiguration.create();
+    TEST_UTIL = new HBaseTestingUtility(conf);
+    TEST_UTIL.startMiniCluster();
+  }
+  
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    try {
+      TEST_UTIL.shutdownMiniCluster();
+    } finally {
+      FsLayout.reset();
+    }
+  }
+  
+  @Test
+  public void testLayoutInitialization() throws Exception {
+    FsLayout.reset();
+    assertNull(FsLayout.getRaw());
+    
+    HierarchicalFsLayout hierarchicalLayout = HierarchicalFsLayout.get();
+    FsLayout.setLayoutForTesting(hierarchicalLayout);
+    
+    assertTrue(FsLayout.getRaw() instanceof HierarchicalFsLayout);
+    assertTrue(FsLayout.get() instanceof HierarchicalFsLayout);
+    
+    FsLayout.reset();
+    assertNull(FsLayout.getRaw());
+    
+    DistributedFileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
+    Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
+    
+    FsLayout.writeLayoutFile(fs, rootDir, hierarchicalLayout, true);
+    assertNull(FsLayout.getRaw());
+    conf.setBoolean(FsLayout.FS_LAYOUT_DETECT, true);
+    FsLayout.initialize(conf);
+    assertNotNull(FsLayout.getRaw());
+    assertTrue(FsLayout.get() instanceof HierarchicalFsLayout);
+    assertTrue(FsLayout.getRaw() instanceof HierarchicalFsLayout);
+    
+    FsLayout.reset();
+    FsLayout.deleteLayoutFile(fs, rootDir);
+    FsLayout.initialize(conf);
+    assertTrue(FsLayout.get() instanceof StandardHBaseFsLayout);
+    assertTrue(FsLayout.getRaw() instanceof StandardHBaseFsLayout);
+    
+    FsLayout.reset();
+    conf.setBoolean(FsLayout.FS_LAYOUT_DETECT_STRICT, true);
+    try {
+      FsLayout.initialize(conf);
+      assertTrue(false);
+    } catch (IllegalStateException e) {
+      // Should be thrown by initialize
+    }
+    
+    FsLayout.reset();
+    conf.setBoolean(FsLayout.FS_LAYOUT_DETECT, false);
+    conf.setBoolean(FsLayout.FS_LAYOUT_DETECT_STRICT, false);
+    FsLayout.writeLayoutFile(fs, rootDir, StandardHBaseFsLayout.get(), true);
+    conf.set(FsLayout.FS_LAYOUT_CHOICE, HierarchicalFsLayout.class.getName());
+    assertTrue(FsLayout.get() instanceof StandardHBaseFsLayout);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
index f2b26c1..6e0f151 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
@@ -18,21 +18,24 @@
 
 package org.apache.hadoop.hbase.io;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.io.IOException;
 import java.util.regex.Matcher;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -83,20 +86,25 @@ public class TestHFileLink {
   }
 
   @Test
-  public void testBackReference() {
+  public void testBackReference() throws IOException {
     Path rootDir = new Path("/root");
     Path archiveDir = new Path(rootDir, ".archive");
     String storeFileName = "121212";
     String linkDir = FileLink.BACK_REFERENCES_DIRECTORY_PREFIX + storeFileName;
-    String encodedRegion = "FEFE";
+    String encodedRegion = "abcabcabcabcabcabcabcabcabcabcab";
     String cf = "cf1";
 
     TableName refTables[] = {TableName.valueOf("refTable"),
         TableName.valueOf("ns", "refTable")};
+    
+    Configuration conf = HBaseConfiguration.create();
 
     for(TableName refTable : refTables) {
       Path refTableDir = FSUtils.getTableDir(archiveDir, refTable);
-      Path refRegionDir = HRegion.getRegionDir(refTableDir, encodedRegion);
+      HRegionInfo refHri = HRegionInfo.makeTestInfoWithEncodedName(refTable, encodedRegion);
+      HRegionFileSystem refHrfs = HRegionFileSystem.create(conf, 
+        rootDir.getFileSystem(conf), refTableDir, refHri);
+      Path refRegionDir = refHrfs.getRegionDir();
       Path refDir = new Path(refRegionDir, cf);
       Path refLinkDir = new Path(refDir, linkDir);
       String refStoreFileName = refTable.getNameAsString().replace(
@@ -107,7 +115,10 @@ public class TestHFileLink {
 
       for( TableName tableName : tableNames) {
         Path tableDir = FSUtils.getTableDir(rootDir, tableName);
-        Path regionDir = HRegion.getRegionDir(tableDir, encodedRegion);
+        HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(tableName, encodedRegion);
+        HRegionFileSystem hrfs = HRegionFileSystem.create(conf, 
+          rootDir.getFileSystem(conf), tableDir, hri);
+        Path regionDir = hrfs.getRegionDir();
         Path cfDir = new Path(regionDir, cf);
 
         //Verify back reference creation

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java
new file mode 100644
index 0000000..07a289b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io;
+
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({IOTests.class, SmallTests.class})
+public class TestHumongousHFileLink extends TestHFileLink {
+  
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    FsLayout.setLayoutForTesting(HierarchicalFsLayout.get());
+  }
+  
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    FsLayout.reset();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 6b68bfe..cbf8695 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -61,8 +61,10 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -581,7 +583,10 @@ public class TestAssignmentManagerOnCluster {
 
       FileSystem fs = FileSystem.get(conf);
       Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table);
-      Path regionDir = new Path(tableDir, hri.getEncodedName());
+      
+      HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri);
+      Path regionDir = hrfs.getRegionDir();
+      
       // create a file named the same as the region dir to
       // mess up with region opening
       fs.create(regionDir, true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 813eb49..7f32c04 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.NonceGenerator;
 import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
@@ -88,7 +88,7 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -120,7 +120,7 @@ import org.junit.experimental.categories.Category;
 @Category({MasterTests.class, LargeTests.class})
 @SuppressWarnings("deprecation")
 public class TestDistributedLogSplitting {
-  private static final Log LOG = LogFactory.getLog(TestSplitLogManager.class);
+  static final Log LOG = LogFactory.getLog(TestSplitLogManager.class);
   static {
     // Uncomment the following line if more verbosity is needed for
     // debugging (see HBASE-12285 for details).
@@ -224,7 +224,6 @@ public class TestDistributedLogSplitting {
 
     installTable(new ZooKeeperWatcher(conf, "table-creation", null),
         "table", "family", 40);
-    TableName table = TableName.valueOf("table");
     List<HRegionInfo> regions = null;
     HRegionServer hrs = null;
     for (int i = 0; i < NUM_RS; i++) {
@@ -258,10 +257,10 @@ public class TestDistributedLogSplitting {
 
     int count = 0;
     for (HRegionInfo hri : regions) {
-
-      Path tdir = FSUtils.getTableDir(rootdir, table);
-      Path editsdir =
-        WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
+      Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable());
+      HRegionFileSystem hrfs = HRegionFileSystem.create(
+        conf, fs, tableDir, hri);
+      Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir());
       LOG.debug("checking edits dir " + editsdir);
       FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
         @Override
@@ -849,10 +848,11 @@ public class TestDistributedLogSplitting {
     int count = 0;
     FileSystem fs = master.getMasterFileSystem().getFileSystem();
     Path rootdir = FSUtils.getRootDir(conf);
-    Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf("disableTable"));
     for (HRegionInfo hri : regions) {
-      Path editsdir =
-        WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
+      Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable());
+      HRegionFileSystem hrfs = HRegionFileSystem.create(
+        conf, fs, tableDir, hri);
+      Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir());
       LOG.debug("checking edits dir " + editsdir);
       if(!fs.exists(editsdir)) continue;
       FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
@@ -880,8 +880,10 @@ public class TestDistributedLogSplitting {
 
     // clean up
     for (HRegionInfo hri : regions) {
-      Path editsdir =
-        WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
+      Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable());
+      HRegionFileSystem hrfs = HRegionFileSystem.create(
+        conf, fs, tableDir, hri);
+      Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir());      
       fs.delete(editsdir, true);
     }
     disablingHT.close();
@@ -1403,14 +1405,19 @@ public class TestDistributedLogSplitting {
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
     Table ht = installTable(zkw, "table", "family", 10);
     FileSystem fs = master.getMasterFileSystem().getFileSystem();
+    
     Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf("table"));
-    List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
-    long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L);
-    WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0) , 1L, 1000L);
+    
+    List<HRegionInfo> tableRegions = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), TableName.valueOf("table"));
+    HRegionInfo hri = tableRegions.get(0);
+    HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri);
+    
+    long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 1L, 1000L);
+    WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 1L, 1000L);
     assertEquals(newSeqId + 2000,
-      WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 3L, 1000L));
+      WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 3L, 1000L));
     
-    Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regionDirs.get(0));
+    Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir());
     FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
       @Override
       public boolean accept(Path p) {
@@ -1421,7 +1428,7 @@ public class TestDistributedLogSplitting {
     assertEquals(1, files.length);
     
     // verify all seqId files aren't treated as recovered.edits files
-    NavigableSet<Path> recoveredEdits = WALSplitter.getSplitEditFilesSorted(fs, regionDirs.get(0));
+    NavigableSet<Path> recoveredEdits = WALSplitter.getSplitEditFilesSorted(fs, hrfs.getRegionDir());
     assertEquals(0, recoveredEdits.size());
     
     ht.close();
@@ -1437,7 +1444,8 @@ public class TestDistributedLogSplitting {
     TableName table = TableName.valueOf(tname);
     byte [] family = Bytes.toBytes(fname);
     LOG.info("Creating table with " + nrs + " regions");
-    Table ht = TEST_UTIL.createMultiRegionTable(table, family, nrs);
+    HTableDescriptor desc = new HTableDescriptor(table);
+    Table ht = TEST_UTIL.createMultiRegionTable(desc, family, nrs);
     int numRegions = -1;
     try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
       numRegions = r.getStartKeys().length;
@@ -1467,7 +1475,7 @@ public class TestDistributedLogSplitting {
     assertEquals(numRegions + 2 + existingRegions, regions.size());
     return ht;
   }
-
+  
   void populateDataInTable(int nrows, String fname) throws Exception {
     byte [] family = Bytes.toBytes(fname);
 
@@ -1614,7 +1622,7 @@ public class TestDistributedLogSplitting {
     return count;
   }
 
-  private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws Exception {
+  void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws Exception {
     TEST_UTIL.waitUntilNoRegionsInTransition(60000);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java
new file mode 100644
index 0000000..834d3d0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java
@@ -0,0 +1,41 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, LargeTests.class})
+public class TestHumongousTableDistributedLogSplitting extends TestDistributedLogSplitting {
+  
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    FsLayout.setLayoutForTesting(HierarchicalFsLayout.get());
+  }
+  
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    FsLayout.reset();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f2856e2c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 078aaa6..2423a5c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -36,8 +36,11 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -173,11 +176,12 @@ public class TestHFileCleaner {
     // setup the cleaner
     FileSystem fs = UTIL.getDFSCluster().getFileSystem();
     HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
-
+    
     // make all the directories for archiving files
     Path table = new Path(archivedHfileDir, "table");
-    Path region = new Path(table, "regionsomthing");
-    Path family = new Path(region, "fam");
+    Path family = HStore.getStoreHomedir(table, "regionsomething", Bytes.toBytes("fam"));
+    Path region = family.getParent();
+    
     Path file = new Path(family, "file12345");
     fs.mkdirs(family);
     if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);