You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2015/09/17 01:53:53 UTC

[1/3] hbase git commit: Incomplete experiment (it does not compile and has missing code)

Repository: hbase
Updated Branches:
  refs/heads/hbase-14439 f2856e2c5 -> e7743d779


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
index 630ca78..be820a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
@@ -25,12 +25,14 @@ import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 
 /**
  * Thread that walks over the filesystem, and computes the mappings
@@ -41,12 +43,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 class FSRegionScanner implements Runnable {
   static private final Log LOG = LogFactory.getLog(FSRegionScanner.class);
 
-  private Path regionPath;
-
-  /**
-   * The file system used
-   */
-  private FileSystem fs;
+  private HRegionFileSystem rfs;
 
   /**
    * Maps each region to the RS with highest locality for that region.
@@ -59,11 +56,10 @@ class FSRegionScanner implements Runnable {
    */
   private Map<String, Map<String, Float>> regionDegreeLocalityMapping;
 
-  FSRegionScanner(FileSystem fs, Path regionPath,
+  FSRegionScanner(Configuration conf, HRegionInfo hri,
                   Map<String, String> regionToBestLocalityRSMapping,
                   Map<String, Map<String, Float>> regionDegreeLocalityMapping) {
-    this.fs = fs;
-    this.regionPath = regionPath;
+    this.rfs = HRegionFileSystem.open(conf, hri);
     this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping;
     this.regionDegreeLocalityMapping = regionDegreeLocalityMapping;
   }
@@ -75,33 +71,13 @@ class FSRegionScanner implements Runnable {
       Map<String, AtomicInteger> blockCountMap = new HashMap<String, AtomicInteger>();
 
       //get table name
-      String tableName = regionPath.getParent().getName();
+      String tableName = rfs.getTable();
       int totalBlkCount = 0;
 
-      // ignore null
-      FileStatus[] cfList = fs.listStatus(regionPath, new FSUtils.FamilyDirFilter(fs));
-      if (null == cfList) {
-        return;
-      }
-
-      // for each cf, get all the blocks information
-      for (FileStatus cfStatus : cfList) {
-        if (!cfStatus.isDirectory()) {
-          // skip because this is not a CF directory
-          continue;
-        }
-
-        FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath());
-        if (null == storeFileLists) {
-          continue;
-        }
-
-        for (FileStatus storeFile : storeFileLists) {
-          BlockLocation[] blkLocations =
-            fs.getFileBlockLocations(storeFile, 0, storeFile.getLen());
-          if (null == blkLocations) {
-            continue;
-          }
+      for (String family: rfs.getFamilies()) {
+        for (StoreFileInfo storeFile: rfs.getStoreFiles(family)) {
+          BlockLocation[] blkLocations = storeFile.getFileBlockLocations(fs);
+          if (blkLocations == null) continue;
 
           totalBlkCount += blkLocations.length;
           for(BlockLocation blk: blkLocations) {
@@ -138,7 +114,7 @@ class FSRegionScanner implements Runnable {
         if (hostToRun.endsWith(".")) {
           hostToRun = hostToRun.substring(0, hostToRun.length()-1);
         }
-        String name = tableName + ":" + regionPath.getName();
+        String name = tableName + ":" + rfs.getRegionInfo().getEncodedName();
         synchronized (regionToBestLocalityRSMapping) {
           regionToBestLocalityRSMapping.put(name,  hostToRun);
         }
@@ -157,7 +133,7 @@ class FSRegionScanner implements Runnable {
         }
         // Put the locality map into the result map, keyed by the encoded name
         // of the region.
-        regionDegreeLocalityMapping.put(regionPath.getName(), hostLocalityMap);
+        regionDegreeLocalityMapping.put(rfs.getRegionInfo().getEncodedName(), hostLocalityMap);
       }
     } catch (IOException e) {
       LOG.warn("Problem scanning file system", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index cce37d7..426b73e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -221,6 +221,7 @@ public class FSTableDescriptors implements TableDescriptors {
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
+      HMasterFileSystem mfs = HMasterFileSystem.open(conf, rootdir);
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
         TableDescriptor htd = null;
         try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index ce51e27..efa8752 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -31,6 +31,7 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -64,7 +65,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.HMasterFileSystem;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -1027,43 +1029,6 @@ public abstract class FSUtils {
   }
 
 
-
-  /**
-   * Runs through the hbase rootdir and checks all stores have only
-   * one file in them -- that is, they've been major compacted.  Looks
-   * at root and meta tables too.
-   * @param fs filesystem
-   * @param hbaseRootDir hbase root directory
-   * @return True if this hbase install is major compacted.
-   * @throws IOException e
-   */
-  public static boolean isMajorCompacted(final FileSystem fs,
-      final Path hbaseRootDir)
-  throws IOException {
-    List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
-    RegionDirFilter regionFilter = new RegionDirFilter(fs);
-    PathFilter familyFilter = new FamilyDirFilter(fs);
-    for (Path tableDir : tableDirs) {
-      List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter);
-      for (FileStatus regionDir : regionDirs) {
-        Path dd = regionDir.getPath();
-        // Else its a region name.  Now look in region for families.
-        FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
-        for (FileStatus familyDir : familyDirs) {
-          Path family = familyDir.getPath();
-          // Now in family make sure only one file.
-          FileStatus[] familyStatus = fs.listStatus(family);
-          if (familyStatus.length > 1) {
-            LOG.debug(family.toString() + " has " + familyStatus.length +
-                " files.");
-            return false;
-          }
-        }
-      }
-    }
-    return true;
-  }
-
   // TODO move this method OUT of FSUtils. No dependencies to HMaster
   /**
    * Returns the total overall fragmentation percentage. Includes hbase:meta and
@@ -1109,38 +1074,32 @@ public abstract class FSUtils {
    * @throws IOException When scanning the directory fails.
    */
   public static Map<String, Integer> getTableFragmentation(
-    final FileSystem fs, final Path hbaseRootDir)
-  throws IOException {
-    Map<String, Integer> frags = new HashMap<String, Integer>();
-    int cfCountTotal = 0;
+      final FileSystem fs, final Path hbaseRootDir) throws IOException {
+    final Map<String, Integer> frags = new HashMap<String, Integer>();
+
     int cfFragTotal = 0;
-    RegionDirFilter regionFilter = new RegionDirFilter(fs);
-    PathFilter familyFilter = new FamilyDirFilter(fs);
-    List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
-    for (Path tableDir : tableDirs) {
+    int cfCountTotal = 0;
+    HMasterFileSystem mfs = HMasterFileSystem.open(fs, hbaseRootDir);
+    for (TableName tableName: mfs.getTables()) {
       int cfCount = 0;
       int cfFrag = 0;
-      List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter);
-      for (FileStatus regionDir : regionDirs) {
-        Path dd = regionDir.getPath();
-        // else its a region name, now look in region for families
-        FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
-        for (FileStatus familyDir : familyDirs) {
+      for (HRegionInfo hri: mfs.getRegions(tableName)) {
+        HRegionFileSystem rfs = HRegionFileSystem.open(conf, hri);
+        for (String family: rfs.getFamilies()) {
           cfCount++;
           cfCountTotal++;
-          Path family = familyDir.getPath();
-          // now in family make sure only one file
-          FileStatus[] familyStatus = fs.listStatus(family);
-          if (familyStatus.length > 1) {
+          Collection<StoreFileInfo> storeFiles = rfs.getStoreFiles(family);
+          if (storeFiles.size() > 1) {
             cfFrag++;
             cfFragTotal++;
           }
         }
       }
       // compute percentage per table and store in result list
-      frags.put(FSUtils.getTableName(tableDir).getNameAsString(),
+      frags.put(tableName.getNameAsString(),
         cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
     }
+
     // set overall percentage for all tables
     frags.put("-TOTAL-",
       cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
@@ -1259,29 +1218,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * A {@link PathFilter} that returns usertable directories. To get all directories use the
-   * {@link BlackListDirFilter} with a <tt>null</tt> blacklist
-   */
-  public static class UserTableDirFilter extends BlackListDirFilter {
-    public UserTableDirFilter(FileSystem fs) {
-      super(fs, HConstants.HBASE_NON_TABLE_DIRS);
-    }
-
-    protected boolean isValidName(final String name) {
-      if (!super.isValidName(name))
-        return false;
-
-      try {
-        TableName.isLegalTableQualifierName(Bytes.toBytes(name));
-      } catch (IllegalArgumentException e) {
-        LOG.info("INVALID NAME " + name);
-        return false;
-      }
-      return true;
-    }
-  }
-
-  /**
    * Heuristic to determine whether is safe or not to open a file for append
    * Looks both for dfs.support.append and use reflection to search
    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
@@ -1336,36 +1272,6 @@ public abstract class FSUtils {
   public abstract void recoverFileLease(final FileSystem fs, final Path p,
       Configuration conf, CancelableProgressable reporter) throws IOException;
 
-  public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
-      throws IOException {
-    List<Path> tableDirs = new LinkedList<Path>();
-
-    for(FileStatus status :
-        fs.globStatus(new Path(rootdir,
-            new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
-      tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
-    }
-    return tableDirs;
-  }
-
-  /**
-   * @param fs
-   * @param rootdir
-   * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
-   * .logs, .oldlogs, .corrupt folders.
-   * @throws IOException
-   */
-  public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
-      throws IOException {
-    // presumes any directory under hbase.rootdir is a table
-    FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
-    List<Path> tabledirs = new ArrayList<Path>(dirs.length);
-    for (FileStatus dir: dirs) {
-      tabledirs.add(dir.getPath());
-    }
-    return tabledirs;
-  }
-
   /**
    * Checks if the given path is the one with 'recovered.edits' dir.
    * @param path
@@ -1376,94 +1282,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * Filter for all dirs that don't start with '.'
-   */
-  public static class RegionDirFilter implements PathFilter {
-    // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
-    final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
-    final FileSystem fs;
-
-    public RegionDirFilter(FileSystem fs) {
-      this.fs = fs;
-    }
-
-    @Override
-    public boolean accept(Path rd) {
-      if (!regionDirPattern.matcher(rd.getName()).matches()) {
-        return false;
-      }
-
-      try {
-        return fs.getFileStatus(rd).isDirectory();
-      } catch (IOException ioe) {
-        // Maybe the file was moved or the fs was disconnected.
-        LOG.warn("Skipping file " + rd +" due to IOException", ioe);
-        return false;
-      }
-    }
-  }
-
-  /**
-   * Filter for all dirs that are legal column family names.  This is generally used for colfam
-   * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
-   */
-  public static class FamilyDirFilter implements PathFilter {
-    final FileSystem fs;
-
-    public FamilyDirFilter(FileSystem fs) {
-      this.fs = fs;
-    }
-
-    @Override
-    public boolean accept(Path rd) {
-      try {
-        // throws IAE if invalid
-        HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
-      } catch (IllegalArgumentException iae) {
-        // path name is an invalid family name and thus is excluded.
-        return false;
-      }
-
-      try {
-        return fs.getFileStatus(rd).isDirectory();
-      } catch (IOException ioe) {
-        // Maybe the file was moved or the fs was disconnected.
-        LOG.warn("Skipping file " + rd +" due to IOException", ioe);
-        return false;
-      }
-    }
-  }
-
-  /**
-   * Given a particular region dir, return all the familydirs inside it
-   *
-   * @param fs A file system for the Path
-   * @param regionDir Path to a specific region directory
-   * @return List of paths to valid family directories in region dir.
-   * @throws IOException
-   */
-  public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
-    // assumes we are in a region dir.
-    FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
-    List<Path> familyDirs = new ArrayList<Path>(fds.length);
-    for (FileStatus fdfs: fds) {
-      Path fdPath = fdfs.getPath();
-      familyDirs.add(fdPath);
-    }
-    return familyDirs;
-  }
-
-  public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
-    FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
-    List<Path> referenceFiles = new ArrayList<Path>(fds.length);
-    for (FileStatus fdfs: fds) {
-      Path fdPath = fdfs.getPath();
-      referenceFiles.add(fdPath);
-    }
-    return referenceFiles;
-  }
-
-  /**
    * Filter for HFiles that excludes reference files.
    */
   public static class HFileFilter implements PathFilter {
@@ -1514,7 +1332,7 @@ public abstract class FSUtils {
    * @throws IOException
    */
   public static FileSystem getCurrentFileSystem(Configuration conf)
-  throws IOException {
+      throws IOException {
     return getRootDir(conf).getFileSystem(conf);
   }
 
@@ -1557,59 +1375,36 @@ public abstract class FSUtils {
    * @throws IOException When scanning the directory fails.
    */
   public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
-  final FileSystem fs, final Path hbaseRootDir, TableName tableName, ErrorReporter errors)
-  throws IOException {
+      final FileSystem fs, final Path hbaseRootDir, final TableName tableName,
+      final ErrorReporter errors) throws IOException {
+    HMasterFileSystem mfs = HMasterFileSystem.open(fs, hbaseRootDir);
+    return getTableStoreFilePathMap(map, mfs, tableName, errors);
+  }
+
+  private static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
+      final HMasterFileSystem mfs, final TableName tableName, final ErrorReporter errors)
+      throws IOException {
     if (map == null) {
       map = new HashMap<String, Path>();
     }
+    for (HRegionInfo hri: mfs.getRegions(tableName)) {
+      if (errors != null) errors.progress();
 
-    // only include the directory paths to tables
-    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
-    // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
-    // should be regions.
-    PathFilter familyFilter = new FamilyDirFilter(fs);
-    List<Path> regionDirs = FsLayout.getRegionDirPaths(fs, tableDir);
-    for (Path dd : regionDirs) {
-      if (null != errors) {
-        errors.progress();
-      }
-      // else its a region name, now look in region for families
-      FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
-      for (FileStatus familyDir : familyDirs) {
-        if (null != errors) {
-          errors.progress();
-        }
-        Path family = familyDir.getPath();
-        if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
-          continue;
-        }
-        // now in family, iterate over the StoreFiles and
-        // put in map
-        FileStatus[] familyStatus = fs.listStatus(family);
-        for (FileStatus sfStatus : familyStatus) {
-          if (null != errors) {
-            errors.progress();
-          }
-          Path sf = sfStatus.getPath();
-          map.put( sf.getName(), sf);
+      HRegionFileSystem rfs = HRegionFileSystem.open(conf, hri);
+      for (String family: rfs.getFamilies()) {
+        if (errors != null) errors.progress();
+
+        for (StoreFileInfo storeFile: rfs.getStoreFiles(family)) {
+          if (errors != null) errors.progress();
+
+          Path sf = storeFile.getPath();
+          map.put(sf.getName(), sf);
         }
       }
     }
     return map;
   }
 
-  public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
-    int result = 0;
-    try {
-      for (Path familyDir:getFamilyDirs(fs, p)){
-        result += getReferenceFilePaths(fs, familyDir).size();
-      }
-    } catch (IOException e) {
-      LOG.warn("Error Counting reference files.", e);
-    }
-    return result;
-  }
-
   /**
    * Runs through the HBase rootdir and creates a reverse lookup map for
    * table StoreFile names to the full Path.
@@ -1643,18 +1438,13 @@ public abstract class FSUtils {
    * @return Map keyed by StoreFile name with a value of the full Path.
    * @throws IOException When scanning the directory fails.
    */
-  public static Map<String, Path> getTableStoreFilePathMap(
-    final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
-  throws IOException {
-    Map<String, Path> map = new HashMap<String, Path>();
-
-    // if this method looks similar to 'getTableFragmentation' that is because
-    // it was borrowed from it.
-
-    // only include the directory paths to tables
-    for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
-      getTableStoreFilePathMap(map, fs, hbaseRootDir,
-          FSUtils.getTableName(tableDir), errors);
+  public static Map<String, Path> getTableStoreFilePathMap(final FileSystem fs,
+      final Path hbaseRootDir, ErrorReporter errors) throws IOException {
+    final Map<String, Path> map = new HashMap<String, Path>();
+    HMasterFileSystem mfs = HMasterFileSystem.open(fs, hbaseRootDir);
+    for (TableName tableName: mfs.getTables()) {
+      if (errors != null) errors.progress();
+      getTableStoreFilePathMap(map, mfs, tableName, errors);
     }
     return map;
   }
@@ -1875,74 +1665,34 @@ public abstract class FSUtils {
     FileSystem fs =  FileSystem.get(conf);
     Path rootPath = FSUtils.getRootDir(conf);
     long startTime = EnvironmentEdgeManager.currentTime();
-    Path queryPath;
-    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
-    if (null == desiredTable) {
-      queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
-    } else {
-      queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
-    }
 
-    // reject all paths that are not appropriate
-    PathFilter pathFilter = new PathFilter() {
-      @Override
-      public boolean accept(Path path) {
-        // this is the region name; it may get some noise data
-        if (null == path) {
-          return false;
-        }
 
-        // no parent?
-        Path parent = path.getParent();
-        if (null == parent) {
-          return false;
-        }
-
-        String regionName = path.getName();
-        if (null == regionName) {
-          return false;
-        }
-
-        if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
-          return false;
-        }
-        return true;
+    HMasterFileSystem mfs = HMasterFileSystem.open(conf);
+    List<HRegionInfo> hris;
+    if (desiredTable != null) {
+      hris = mfs.getRegions(TableName.valueOf(desiredTable));
+    } else {
+      hris = new ArrayList<HRegionInfo>();
+      for (TableName tableName: mfs.getTables()) {
+        hris.addAll(mfs.getRegions(tableName));
       }
-    };
-
-    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);
+    }
 
-    if (null == statusList) {
+    if (hris.isEmpty()) {
       return;
-    } else {
-      LOG.debug("Query Path: " + queryPath + " ; # list of files: " +
-          statusList.length);
     }
 
     // lower the number of threads in case we have very few expected regions
-    threadPoolSize = Math.min(threadPoolSize, statusList.length);
+    threadPoolSize = Math.min(threadPoolSize, hris.size());
 
     // run in multiple threads
     ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize,
         threadPoolSize, 60, TimeUnit.SECONDS,
-        new ArrayBlockingQueue<Runnable>(statusList.length));
+        new ArrayBlockingQueue<Runnable>(hris.size()));
     try {
       // ignore all file status items that are not of interest
-      for (FileStatus regionStatus : statusList) {
-        if (null == regionStatus) {
-          continue;
-        }
-
-        if (!regionStatus.isDirectory()) {
-          continue;
-        }
-
-        Path regionPath = regionStatus.getPath();
-        if (null == regionPath) {
-          continue;
-        }
-
-        tpe.execute(new FSRegionScanner(fs, regionPath,
+      for (HRegionInfo hri: hris) {
+        tpe.execute(new FSRegionScanner(conf, hri,
             regionToBestLocalityRSMapping, regionDegreeLocalityMapping));
       }
     } finally {
@@ -2010,7 +1760,7 @@ public abstract class FSUtils {
   /**
    * @param c
    * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on hdfs.
-   * @throws IOException 
+   * @throws IOException
    */
   public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
deleted file mode 100644
index efd8124..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.NavigableSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.wal.WALSplitter;
-
-/**
- * Utility methods for interacting with the hbase.root file system.
- */
-@InterfaceAudience.Private
-public final class FSVisitor {
-  private static final Log LOG = LogFactory.getLog(FSVisitor.class);
-
-  public interface RegionVisitor {
-    void region(final String region) throws IOException;
-  }
-
-  public interface StoreFileVisitor {
-    void storeFile(final String region, final String family, final String hfileName)
-       throws IOException;
-  }
-
-  public interface RecoveredEditsVisitor {
-    void recoveredEdits (final String region, final String logfile)
-      throws IOException;
-  }
-
-  public interface LogFileVisitor {
-    void logFile (final String server, final String logfile)
-      throws IOException;
-  }
-
-  private FSVisitor() {
-    // private constructor for utility class
-  }
-
-  /**
-   * Iterate over the table store files
-   *
-   * @param fs {@link FileSystem}
-   * @param tableDir {@link Path} to the table directory
-   * @param visitor callback object to get the store files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitRegions(final FileSystem fs, final Path tableDir,
-      final RegionVisitor visitor) throws IOException {
-    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
-    if (regions == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No regions under directory:" + tableDir);
-      }
-      return;
-    }
-
-    for (FileStatus region: regions) {
-      visitor.region(region.getPath().getName());
-    }
-  }
-
-  /**
-   * Iterate over the table store files
-   *
-   * @param fs {@link FileSystem}
-   * @param tableDir {@link Path} to the table directory
-   * @param visitor callback object to get the store files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir,
-      final StoreFileVisitor visitor) throws IOException {
-    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
-    if (regions == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No regions under directory:" + tableDir);
-      }
-      return;
-    }
-
-    for (FileStatus region: regions) {
-      visitRegionStoreFiles(fs, region.getPath(), visitor);
-    }
-  }
-
-  /**
-   * Iterate over the region store files
-   *
-   * @param fs {@link FileSystem}
-   * @param regionDir {@link Path} to the region directory
-   * @param visitor callback object to get the store files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
-      final StoreFileVisitor visitor) throws IOException {
-    FileStatus[] families = FSUtils.listStatus(fs, regionDir, new FSUtils.FamilyDirFilter(fs));
-    if (families == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No families under region directory:" + regionDir);
-      }
-      return;
-    }
-
-    PathFilter fileFilter = new FSUtils.FileFilter(fs);
-    for (FileStatus family: families) {
-      Path familyDir = family.getPath();
-      String familyName = familyDir.getName();
-
-      // get all the storeFiles in the family
-      FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, fileFilter);
-      if (storeFiles == null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("No hfiles found for family: " + familyDir + ", skipping.");
-        }
-        continue;
-      }
-
-      for (FileStatus hfile: storeFiles) {
-        Path hfilePath = hfile.getPath();
-        visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName());
-      }
-    }
-  }
-
-  /**
-   * Iterate over each region in the table and inform about recovered.edits
-   *
-   * @param fs {@link FileSystem}
-   * @param tableDir {@link Path} to the table directory
-   * @param visitor callback object to get the recovered.edits files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir,
-      final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
-    List<FileStatus> regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs));
-    if (regions == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No recoveredEdits regions under directory:" + tableDir);
-      }
-      return;
-    }
-
-    for (FileStatus region: regions) {
-      visitRegionRecoveredEdits(fs, region.getPath(), visitor);
-    }
-  }
-
-  /**
-   * Iterate over recovered.edits of the specified region
-   *
-   * @param fs {@link FileSystem}
-   * @param regionDir {@link Path} to the Region directory
-   * @param visitor callback object to get the recovered.edits files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir,
-      final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
-    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regionDir);
-    if (files == null || files.size() == 0) return;
-
-    for (Path source: files) {
-      // check to see if the file is zero length, in which case we can skip it
-      FileStatus stat = fs.getFileStatus(source);
-      if (stat.getLen() <= 0) continue;
-
-      visitor.recoveredEdits(regionDir.getName(), source.getName());
-    }
-  }
-
-  /**
-   * Iterate over hbase log files
-   *
-   * @param fs {@link FileSystem}
-   * @param rootDir {@link Path} to the HBase root folder
-   * @param visitor callback object to get the log files
-   * @throws IOException if an error occurred while scanning the directory
-   */
-  public static void visitLogFiles(final FileSystem fs, final Path rootDir,
-      final LogFileVisitor visitor) throws IOException {
-    Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-    FileStatus[] logServerDirs = FSUtils.listStatus(fs, logsDir);
-    if (logServerDirs == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No logs under directory:" + logsDir);
-      }
-      return;
-    }
-
-    for (FileStatus serverLogs: logServerDirs) {
-      String serverName = serverLogs.getPath().getName();
-
-      FileStatus[] wals = FSUtils.listStatus(fs, serverLogs.getPath());
-      if (wals == null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("No wals found for server: " + serverName + ", skipping.");
-        }
-        continue;
-      }
-
-      for (FileStatus walRef: wals) {
-        visitor.logFile(serverName, walRef.getPath().getName());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 98ad22b..8b2796e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -118,7 +119,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
@@ -395,7 +395,7 @@ public class HBaseFsck extends Configured implements Closeable {
           LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
               + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
               + retryCounter.getMaxAttempts());
-          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), 
+          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),
               ioe);
           try {
             exception = ioe;
@@ -762,7 +762,7 @@ public class HBaseFsck extends Configured implements Closeable {
         currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
         // For each region, get the start and stop key from the META and compare them to the
         // same information from the Stores.
-        HRegionFileSystem hrfs = HRegionFileSystem.create(getConf(), fs, tableDir, regionInfo);
+        HRegionFileSystem hrfs = HRegionFileSystem.open(getConf(), regionInfo, true);
         Path path = hrfs.getRegionDir();
         FileStatus[] files = fs.listStatus(path);
         // For all the column families in this region...
@@ -2279,8 +2279,7 @@ public class HBaseFsck extends Configured implements Closeable {
                   LOG.warn(hri + " start and stop keys are in the range of " + region
                       + ". The region might not be cleaned up from hdfs when region " + region
                       + " split failed. Hence deleting from hdfs.");
-                  HRegionFileSystem.deleteAndArchiveRegionFromFileSystem(getConf(), fs,
-                    FsLayout.getTableDirFromRegionDir(regionDir), hri);
+                  HRegionFileSystem.destroy(getConf(), hri);
                   return;
                 }
               }
@@ -2688,10 +2687,10 @@ public class HBaseFsck extends Configured implements Closeable {
         }
         regionsFromMeta = Ordering.natural().immutableSortedCopy(regions);
       }
-      
+
       return regionsFromMeta;
     }
-    
+
     private class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl {
       ErrorReporter errors;
 
@@ -4041,7 +4040,7 @@ public class HBaseFsck extends Configured implements Closeable {
     public synchronized Void call() throws IOException {
       try {
         // level 2: <HBASE_DIR>/<table>/*
-        List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir.getPath(), 
+        List<FileStatus> regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir.getPath(),
           new FSUtils.RegionDirFilter(fs));
         if (regionDirs == null) {
           return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
index 7f74d55..f2dbf52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index ea704f8..1b173bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -412,7 +412,7 @@ public class RegionSplitter {
    * Alternative getCurrentNrHRS which is no longer available.
    * @param connection
    * @return Rough count of regionservers out on cluster.
-   * @throws IOException 
+   * @throws IOException
    */
   private static int getRegionServerCount(final Connection connection) throws IOException {
     try (Admin admin = connection.getAdmin()) {
@@ -767,7 +767,7 @@ public class RegionSplitter {
    * @param conf
    * @param tableName
    * @return A Pair where first item is table dir and second is the split file.
-   * @throws IOException 
+   * @throws IOException
    */
   private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf,
       final TableName tableName)
@@ -785,7 +785,7 @@ public class RegionSplitter {
       getTableDirAndSplitFile(connection.getConfiguration(), tableName);
     Path tableDir = tableDirAndSplitFile.getFirst();
     Path splitFile = tableDirAndSplitFile.getSecond();
- 
+
     FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
 
     // Using strings because (new byte[]{0}).equals(new byte[]{0}) == false

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 303ed60..9cbb135 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -34,7 +34,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -43,10 +42,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
-import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
-import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java
deleted file mode 100644
index 7aa9761..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.Set;
-import java.util.HashSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.junit.*;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test {@link FSUtils}.
- */
-@Category({MiscTests.class, MediumTests.class})
-public class TestFSVisitor {
-  private static final Log LOG = LogFactory.getLog(TestFSVisitor.class);
-
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  private final String TABLE_NAME = "testtb";
-
-  private Set<String> tableFamilies;
-  private Set<String> tableRegions;
-  private Set<String> recoveredEdits;
-  private Set<String> tableHFiles;
-  private Set<String> regionServers;
-  private Set<String> serverLogs;
-
-  private FileSystem fs;
-  private Path tableDir;
-  private Path logsDir;
-  private Path rootDir;
-
-  @Before
-  public void setUp() throws Exception {
-    fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    rootDir = TEST_UTIL.getDataTestDir("hbase");
-    logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-
-    tableFamilies = new HashSet<String>();
-    tableRegions = new HashSet<String>();
-    recoveredEdits = new HashSet<String>();
-    tableHFiles = new HashSet<String>();
-    regionServers = new HashSet<String>();
-    serverLogs = new HashSet<String>();
-    tableDir = createTableFiles(rootDir, TABLE_NAME, tableRegions, tableFamilies, tableHFiles);
-    createRecoverEdits(tableDir, tableRegions, recoveredEdits);
-    createLogs(logsDir, regionServers, serverLogs);
-    FSUtils.logFileSystemState(fs, rootDir, LOG);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    fs.delete(rootDir, true);
-  }
-
-  @Test
-  public void testVisitStoreFiles() throws IOException {
-    final Set<String> regions = new HashSet<String>();
-    final Set<String> families = new HashSet<String>();
-    final Set<String> hfiles = new HashSet<String>();
-    FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
-      public void storeFile(final String region, final String family, final String hfileName)
-          throws IOException {
-        regions.add(region);
-        families.add(family);
-        hfiles.add(hfileName);
-      }
-    });
-    assertEquals(tableRegions, regions);
-    assertEquals(tableFamilies, families);
-    assertEquals(tableHFiles, hfiles);
-  }
-
-  @Test
-  public void testVisitRecoveredEdits() throws IOException {
-    final Set<String> regions = new HashSet<String>();
-    final Set<String> edits = new HashSet<String>();
-    FSVisitor.visitTableRecoveredEdits(fs, tableDir, new FSVisitor.RecoveredEditsVisitor() {
-      public void recoveredEdits (final String region, final String logfile)
-          throws IOException {
-        regions.add(region);
-        edits.add(logfile);
-      }
-    });
-    assertEquals(tableRegions, regions);
-    assertEquals(recoveredEdits, edits);
-  }
-
-  @Test
-  public void testVisitLogFiles() throws IOException {
-    final Set<String> servers = new HashSet<String>();
-    final Set<String> logs = new HashSet<String>();
-    FSVisitor.visitLogFiles(fs, rootDir, new FSVisitor.LogFileVisitor() {
-      public void logFile (final String server, final String logfile) throws IOException {
-        servers.add(server);
-        logs.add(logfile);
-      }
-    });
-    assertEquals(regionServers, servers);
-    assertEquals(serverLogs, logs);
-  }
-
-
-  /*
-   * |-testtb/
-   * |----f1d3ff8443297732862df21dc4e57262/
-   * |-------f1/
-   * |----------d0be84935ba84b66b1e866752ec5d663
-   * |----------9fc9d481718f4878b29aad0a597ecb94
-   * |-------f2/
-   * |----------4b0fe6068c564737946bcf4fd4ab8ae1
-   */
-  private Path createTableFiles(final Path rootDir, final String tableName,
-      final Set<String> tableRegions, final Set<String> tableFamilies,
-      final Set<String> tableHFiles) throws IOException {
-    Path tableDir = new Path(rootDir, tableName);
-    for (int r = 0; r < 10; ++r) {
-      String regionName = MD5Hash.getMD5AsHex(Bytes.toBytes(r));
-      tableRegions.add(regionName);
-      Path regionDir = FsLayout.getRegionDir(tableDir, regionName);
-      for (int f = 0; f < 3; ++f) {
-        String familyName = "f" + f;
-        tableFamilies.add(familyName);
-        Path familyDir = new Path(regionDir, familyName);
-        fs.mkdirs(familyDir);
-        for (int h = 0; h < 5; ++h) {
-         String hfileName = UUID.randomUUID().toString().replaceAll("-", "");
-         tableHFiles.add(hfileName);
-         fs.createNewFile(new Path(familyDir, hfileName));
-        }
-      }
-    }
-    return tableDir;
-  }
-
-  /*
-   * |-testtb/
-   * |----f1d3ff8443297732862df21dc4e57262/
-   * |-------recovered.edits/
-   * |----------0000001351969633479
-   * |----------0000001351969633481
-   */
-  private void createRecoverEdits(final Path tableDir, final Set<String> tableRegions,
-      final Set<String> recoverEdits) throws IOException {
-    for (String region: tableRegions) {
-      Path regionEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(FsLayout.getRegionDir(tableDir, region));
-      long seqId = System.currentTimeMillis();
-      for (int i = 0; i < 3; ++i) {
-        String editName = String.format("%019d", seqId + i);
-        recoverEdits.add(editName);
-        FSDataOutputStream stream = fs.create(new Path(regionEditsDir, editName));
-        stream.write(Bytes.toBytes("test"));
-        stream.close();
-      }
-    }
-  }
-
-  /*
-   * Old style
-   * |-.logs/
-   * |----server5,5,1351969633508/
-   * |-------server5,5,1351969633508.0
-   * |----server6,6,1351969633512/
-   * |-------server6,6,1351969633512.0
-   * |-------server6,6,1351969633512.3
-   * New style
-   * |-.logs/
-   * |----server3,5,1351969633508/
-   * |-------server3,5,1351969633508.default.0
-   * |----server4,6,1351969633512/
-   * |-------server4,6,1351969633512.default.0
-   * |-------server4,6,1351969633512.some_provider.3
-   */
-  private void createLogs(final Path logDir, final Set<String> servers,
-      final Set<String> logs) throws IOException {
-    for (int s = 0; s < 7; ++s) {
-      String server = String.format("server%d,%d,%d", s, s, System.currentTimeMillis());
-      servers.add(server);
-      Path serverLogDir = new Path(logDir, server);
-      if (s % 2 == 0) {
-        if (s % 3 == 0) {
-          server += ".default";
-        } else {
-          server += "." + s;
-        }
-      }
-      fs.mkdirs(serverLogDir);
-      for (int i = 0; i < 5; ++i) {
-        String logfile = server + '.' + i;
-        logs.add(logfile);
-        FSDataOutputStream stream = fs.create(new Path(serverLogDir, logfile));
-        stream.write(Bytes.toBytes("test"));
-        stream.close();
-      }
-    }
-  }
-}


[3/3] hbase git commit: Incomplete experiment (it does not compile and has missing code)

Posted by mb...@apache.org.
Incomplete experiment (it does not compile and has missing code)

Trying to move everyting fs related to fs/
HMasterFileSystem has a LegacyMasterFs and a HierarchicalMasterFs impl
HRegionFileSystem has a LegacyRegionFs and a HierarchicalRegionFs impl

I still have the old MasterFileSystem around,
because I didn't have time to move it.
but the idea is to move everything that loops on the dirs
or knows about the layout in the MasterFs and RegionFs classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e7743d77
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e7743d77
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e7743d77

Branch: refs/heads/hbase-14439
Commit: e7743d779a8bfcd65d874afdb88d273ba9e5c4f2
Parents: f2856e2
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Sep 16 14:12:39 2015 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Sep 16 16:49:25 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HTableDescriptor.java   |   19 -
 .../hadoop/hbase/fs/HMasterFileSystem.java      |   61 +
 .../hadoop/hbase/fs/HRegionFileSystem.java      |  275 ++++
 .../hbase/fs/HierarchicalMasterFileSystem.java  |   49 +
 .../hbase/fs/HierarchicalRegionFileSystem.java  |   88 ++
 .../hadoop/hbase/fs/LegacyMasterFileSystem.java |  152 +++
 .../hadoop/hbase/fs/LegacyRegionFileSystem.java |  293 +++++
 .../hadoop/hbase/fs/layout/AFsLayout.java       |   55 -
 .../hbase/fs/layout/AHFileLinkManager.java      |   73 --
 .../apache/hadoop/hbase/fs/layout/FsLayout.java |  220 ----
 .../hbase/fs/layout/HierarchicalFsLayout.java   |  171 ---
 .../hbase/fs/layout/StandardHBaseFsLayout.java  |  115 --
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |   29 +-
 .../hbase/io/hfile/HFilePrettyPrinter.java      |   38 +-
 .../hadoop/hbase/master/CatalogJanitor.java     |   27 +-
 .../hadoop/hbase/master/MasterFileSystem.java   |   40 +-
 .../hadoop/hbase/master/RegionStates.java       |    4 +-
 .../hbase/regionserver/CompactionTool.java      |    2 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   98 +-
 .../hbase/regionserver/HRegionFileSystem.java   | 1216 ------------------
 .../regionserver/HRegionFileSystemFactory.java  |   30 -
 .../hadoop/hbase/regionserver/HStore.java       |    6 +-
 .../HierarchicalHRegionFileSystem.java          |   48 -
 .../HierarchicalHRegionFileSystemFactory.java   |   31 -
 .../RegionDoesNotExistException.java            |   42 +
 .../RegionMergeTransactionImpl.java             |    3 +-
 .../regionserver/SplitTransactionImpl.java      |    5 +-
 .../hbase/regionserver/StoreFileInfo.java       |  111 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |    4 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |    5 +-
 .../hbase/snapshot/SnapshotManifestV1.java      |    8 +-
 .../hadoop/hbase/util/FSRegionScanner.java      |   52 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |    1 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  368 +-----
 .../org/apache/hadoop/hbase/util/FSVisitor.java |  231 ----
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   15 +-
 .../hadoop/hbase/util/HFileV1Detector.java      |    2 +-
 .../hadoop/hbase/util/RegionSplitter.java       |    8 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |    5 -
 .../apache/hadoop/hbase/util/TestFSVisitor.java |  229 ----
 40 files changed, 1187 insertions(+), 3042 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 9145cdc..1722268 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -1368,25 +1368,6 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
       remove(match);
   }
 
-  /**
-   * Returns the {@link Path} object representing the table directory under
-   * path rootdir
-   *
-   * Deprecated use FSUtils.getTableDir() instead.
-   *
-   * @param rootdir qualified path of HBase root directory
-   * @param tableName name of table
-   * @return {@link Path} for table
-   */
-  @Deprecated
-  public static Path getTableDir(Path rootdir, final byte [] tableName) {
-    //This is bad I had to mirror code from FSUTils.getTableDir since
-    //there is no module dependency between hbase-client and hbase-server
-    TableName name = TableName.valueOf(tableName);
-    return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
-              new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
-  }
-
   /** Table descriptor for <code>hbase:meta</code> catalog table
    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
    * Admin#getTableDescriptor(TableName.META_TABLE) instead.

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HMasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HMasterFileSystem.java
new file mode 100644
index 0000000..8f92cf0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HMasterFileSystem.java
@@ -0,0 +1,61 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+public abstract class HMasterFileSystem {
+  public abstract Collection<String> getNamespaces() throws IOException;
+  public abstract Collection<TableName> getTables(String namespace) throws IOException;
+  public abstract Collection<HRegionInfo> getRegions(TableName tableName) throws IOException;
+
+  public Collection<TableName> getTables() throws IOException {
+    ArrayList<TableName> tables = new ArrayList<TableName>();
+    for (String ns: getNamespaces()) {
+      tables.addAll(getTables(ns));
+    }
+    return tables;
+  }
+
+  public static HMasterFileSystem open(final Configuration conf) {
+    return open(conf, FSUtils.getRootDir(conf));
+  }
+
+  public static HMasterFileSystem open(Configuration conf, Path rootDir) {
+    String fsType = conf.get("hbase.fs.layout.type").toLowerCase();
+    switch (fsType) {
+      case "legacy":
+        return new LegacyMasterFileSystem(conf, rootDir);
+      case "hierarchical":
+        return new HierarchicalMasterFileSystem(conf, rootDir);
+      default:
+        throw new IOException("Invalid filesystem type " + fsType);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HRegionFileSystem.java
new file mode 100644
index 0000000..966ac06
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HRegionFileSystem.java
@@ -0,0 +1,275 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+
+@InterfaceAudience.Private
+public abstract class HRegionFileSystem {
+  private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
+
+  // blah... compat
+  public enum Type { TABLE, SNAPSHOT, ARCHIVE };
+
+  /** Name of the region info file that resides just under the region directory. */
+  public final static String REGION_INFO_FILE = ".regioninfo";
+
+  // ==========================================================================
+  //  PUBLIC methods - add/remove/list store files
+  // ==========================================================================
+  /**
+   * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
+   * to get a safer file creation.
+   * <code>
+   * Path file = fs.createTempName();
+   * ...StoreFile.Writer(file)...
+   * fs.commitStoreFile("family", file);
+   * </code>
+   *
+   * @return Unique {@link Path} of the temporary file
+   */
+  public Path createTempName() {
+    return createTempName(null);
+  }
+
+  /**
+   * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
+   * to get a safer file creation.
+   * <code>
+   * Path file = fs.createTempName();
+   * ...StoreFile.Writer(file)...
+   * fs.commitStoreFile("family", file);
+   * </code>
+   *
+   * @param suffix extra information to append to the generated name
+   * @return Unique {@link Path} of the temporary file
+   */
+  public abstract Path createTempName(final String suffix); // TODO REMOVE THIS
+
+
+  /**
+   * Move the file from a build/temp location to the main family store directory.
+   * @param familyName Family that will gain the file
+   * @param buildPath {@link Path} to the file to commit.
+   * @return The new {@link Path} of the committed file
+   * @throws IOException
+   */
+  public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
+    return commitStoreFile(familyName, buildPath, -1, false);
+  }
+
+  public abstract Path commitStoreFile(final String familyName, final Path buildPath,
+      final long seqNum, final boolean generateNewName) throws IOException;
+
+  /**
+   * Moves multiple store files to the relative region's family store directory.
+   * @param storeFiles list of store files divided by family
+   * @throws IOException
+   */
+  public abstract void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles)
+      throws IOException;
+
+  /**
+   * Bulk load: Add a specified store file to the specified family.
+   * If the source file is on the same different file-system is moved from the
+   * source location to the destination location, otherwise is copied over.
+   *
+   * @param familyName Family that will gain the file
+   * @param srcPath {@link Path} to the file to import
+   * @param seqNum Bulk Load sequence number
+   * @return The destination {@link Path} of the bulk loaded file
+   * @throws IOException
+   */
+  public abstract Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
+      throws IOException;
+
+  /**
+   * Archives the specified store file from the specified family.
+   * @param familyName Family that contains the store files
+   * @param filePath {@link Path} to the store file to remove
+   * @throws IOException if the archiving fails
+   */
+  public abstract void removeStoreFile(String familyName, Path filePath)
+      throws IOException;
+
+  /**
+   * Closes and archives the specified store files from the specified family.
+   * @param familyName Family that contains the store files
+   * @param storeFiles set of store files to remove
+   * @throws IOException if the archiving fails
+   */
+  public abstract void removeStoreFiles(String familyName, Collection<StoreFile> storeFiles)
+      throws IOException;
+
+  public abstract Path getStoreFilePath(final String familyName, final String fileName);
+
+  /**
+   * Returns the store files available for the family.
+   * This methods performs the filtering based on the valid store files.
+   * @param familyName Column Family Name
+   * @return a set of {@link StoreFileInfo} for the specified family.
+   */
+  public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
+    return getStoreFiles(Bytes.toString(familyName));
+  }
+
+  public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
+    return getStoreFiles(familyName, true);
+  }
+
+  /**
+   * Returns the store files available for the family.
+   * This methods performs the filtering based on the valid store files.
+   * @param familyName Column Family Name
+   * @return a set of {@link StoreFileInfo} for the specified family.
+   */
+  public abstract Collection<StoreFileInfo> getStoreFiles(String familyName, boolean validate)
+      throws IOException;
+
+  /**
+   * Return the store file information of the specified family/file.
+   *
+   * @param familyName Column Family Name
+   * @param fileName File Name
+   * @return The {@link StoreFileInfo} for the specified family/file
+   */
+  public abstract StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
+      throws IOException;
+
+  /**
+   * @return the set of families present on disk
+   * @throws IOException
+   */
+  public abstract Collection<String> getFamilies() throws IOException;
+
+  // ==========================================================================
+  //  ??? methods - do we still need this stuff in 2.0?
+  // ==========================================================================
+  public abstract void writeRecoveryCheckPoint() throws IOException;
+  public abstract void cleanup() throws IOException;
+
+  // ???
+  public void logFileSystemState(Log log) {}
+  public HRegionInfo getRegionInfoForFS() { return null; }
+  public FileSystem getFileSystem() { return null; }
+  public Path getRegionDir() { return null; }
+  public Path getStoreDir(String family) { return null; }
+
+  // remove on proc-v2 rewrite... move now in Merge/Split FS interface?
+  public void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {}
+  public void commitDaughterRegion(HRegionInfo hri) {}
+  public void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {}
+  public void commitMergedRegion(HRegionInfo hri) {}
+  public void createSplitsDir() throws IOException {}
+  public Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
+      final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy)
+          throws IOException { return null; }
+  public Path getMergesDir() { return null; }
+  public Path getMergesDir(final HRegionInfo hri) { return null; }
+  public void createMergesDir() throws IOException {}
+  public void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter) {}
+  public void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, HRegionInfo daughter) {}
+  public void cleanupSplitsDir() throws IOException {}
+  public void cleanupMergesDir() throws IOException {}
+  public Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
+      final StoreFile f, final Path mergedDir)
+      throws IOException { return null; }
+
+  public boolean hasReferences(final String familyName) { return false; }
+  public boolean hasReferences(final HTableDescriptor htd) { return false; }
+
+  public void openFamily(final String family) throws IOException {}
+
+  // ==========================================================================
+  //  MAYBE methods
+  // ==========================================================================
+  public abstract HRegionInfo getRegionInfo();
+
+  public TableName getTable() {
+    return getRegionInfo().getTable();
+  }
+
+  // ==========================================================================
+  //  PUBLIC methods - create/open/destroy
+  // ==========================================================================
+  public abstract void open(boolean rdonly) throws IOException;
+  public abstract void create() throws IOException;
+  public abstract void destroy() throws IOException;
+
+  // ==========================================================================
+  //  PUBLIC Static/Factory methods - create/open/destroy
+  // ==========================================================================
+  public static HRegionFileSystem createSnapshot(final Configuration conf,
+      final HRegionInfo regionInfo) throws IOException {
+    // we need a dir on open, create if we want to reuse...
+    return null;
+  }
+
+  public static HRegionFileSystem create(final Configuration conf,
+      final HRegionInfo regionInfo) throws IOException {
+    HRegionFileSystem rfs = getInstance(conf, regionInfo);
+    rfs.create();
+    return rfs;
+  }
+
+  public static HRegionFileSystem destroy(final Configuration conf,
+      final HRegionInfo regionInfo) throws IOException {
+    HRegionFileSystem rfs = getInstance(conf, regionInfo);
+    rfs.destroy();
+    return rfs;
+  }
+
+  public static HRegionFileSystem open(final Configuration conf,
+      final HRegionInfo regionInfo, boolean readOnly) throws IOException {
+    HRegionFileSystem rfs = getInstance(conf, regionInfo);
+    rfs.open(readOnly);
+    return rfs;
+  }
+
+  private static HRegionFileSystem getInstance(final Configuration conf,
+      final HRegionInfo regionInfo) throws IOException {
+    String fsType = conf.get("hbase.fs.layout.type").toLowerCase();
+    switch (fsType) {
+      case "legacy":
+        return new LegacyRegionFileSystem(conf, regionInfo);
+      case "hierarchical":
+        return new HierarchicalRegionFileSystem(conf, regionInfo);
+      default:
+        throw new IOException("Invalid filesystem type " + fsType);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalMasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalMasterFileSystem.java
new file mode 100644
index 0000000..fef4686
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalMasterFileSystem.java
@@ -0,0 +1,49 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+public class HierarchicalMasterFileSystem extends LegacyMasterFileSystem {
+  public HierarchicalMasterFileSystem(Configuration conf, Path rootDir) {
+    super(conf, rootDir);
+  }
+
+  public Collection<HRegionInfo> getRegions(TableName tableName) throws IOException {
+    /*
+    for (...ns bucket..) {
+      for (...rs...) {
+        ...
+      }
+    }
+    */
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalRegionFileSystem.java
new file mode 100644
index 0000000..d6a6a97
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HierarchicalRegionFileSystem.java
@@ -0,0 +1,88 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+
+@InterfaceAudience.Private
+public class HierarchicalRegionFileSystem extends LegacyRegionFileSystem {
+  private static final Log LOG = LogFactory.getLog(HierarchicalRegionFileSystem.class);
+
+  private static final String OLD_REGION_NAME_PADDING = "abcdef1234abcdef1234abcdef1234ab";
+
+  /** Number of characters for DIR name, 4 characters for 16^4 = 65536 buckets. */
+  public static final int HUMONGOUS_DIR_NAME_SIZE = 4;
+
+  public HierarchicalRegionFileSystem(Configuration conf, HRegionInfo hri) {
+    super(conf, hri);
+  }
+
+  // ==========================================================================
+  //  PROTECTED FS-Layout Internals
+  // ==========================================================================
+  @Override
+  protected Path getRegionDir(Path baseDir) {
+    return getHumongousRegionDir(baseDir, getTable(), getRegionInfo().getEncodedName());
+  }
+
+  private Path getHumongousRegionDir(Path baseDir, TableName tableName, String name) {
+    if (name.length() != HRegionInfo.MD5_HEX_LENGTH) {
+      String table = tableName.getQualifierAsString();
+      String namespace = tableName.getNamespaceAsString();
+
+      // Meta and old root table use the old encoded name format still
+      if (!namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
+        throw new IllegalArgumentException("The region with encoded name " + name
+          + " is not a humongous region, cannot get humongous region dir from it.");
+      }
+      if (!table.equals(TableName.META_TABLE_NAME.getQualifierAsString()) &&
+          !table.equals(TableName.OLD_ROOT_TABLE_NAME.getQualifierAsString())) {
+        throw new IllegalArgumentException("The region with encoded name " + name
+          + " is not a humongous region, cannot get humongous region dir from it.");
+      }
+
+      // Add padding to guarantee we will have enough characters
+      return new Path(new Path(baseDir, makeBucketName(name, OLD_REGION_NAME_PADDING)), name);
+    }
+    return new Path(new Path(baseDir, makeBucketName(name, null)), name);
+  }
+
+  private String makeBucketName(String regionName, String padding) {
+    if (padding != null) {
+      regionName = regionName + padding;
+    }
+    return regionName.substring(HRegionInfo.MD5_HEX_LENGTH - HUMONGOUS_DIR_NAME_SIZE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyMasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyMasterFileSystem.java
new file mode 100644
index 0000000..87ee61b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyMasterFileSystem.java
@@ -0,0 +1,152 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+public class LegacyMasterFileSystem extends HMasterFileSystem {
+  private final Path activeNsDir;
+  private final Path rootDir;
+
+  private final FileSystem fs;
+
+  public LegacyMasterFileSystem(Configuration conf, Path rootDir) {
+    this.fs = rootDir.getFileSystem(conf);
+    this.rootDir = rootDir;
+    this.activeNsDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
+  }
+
+  public Collection<String> getNamespaces() throws IOException {
+    return getNamespaces(activeNsDir);
+  }
+
+  protected Collection<String> getNamespaces(Path baseDir) throws IOException {
+    FileStatus[] activeNsDirs = fs.globStatus(new Path(baseDir, "*"));
+    if (activeNsDirs == null || activeNsDirs.length == 0) {
+      return Collections.emptyList();
+    }
+    ArrayList<String> namespaces = new ArrayList<String>(activeNsDirs.length);
+    for (int i = 0; i < activeNsDirs.length; ++i) {
+      namespaces.add(activeNsDirs[i].getPath().getName());
+    }
+    return namespaces;
+  }
+
+  public Collection<TableName> getTables(String namespace) throws IOException {
+    Path baseDir = new Path(activeNsDir, namespace);
+    FileStatus[] dirs = fs.listStatus(baseDir, new UserTableDirFilter(fs));
+    if (dirs == null || dirs.length == 0) {
+      return Collections.emptyList();
+    }
+
+    ArrayList<TableName> tables = new ArrayList<TableName>(dirs.length);
+    for (int i = 0; i < dirs.length; ++i) {
+      tables.add(TableName.valueOf(namespace, dirs[i].getPath().getName()));
+    }
+    return tables;
+  }
+
+  public Collection<HRegionInfo> getRegions(TableName tableName) throws IOException {
+    return null;
+  }
+
+  protected Collection<HRegionInfo> getRegions(Path baseDir) throws IOException {
+    FileStatus[] dirs = fs.listStatus(baseDir, new RegionDirFilter(fs));
+    if (dirs == null || dirs.length == 0) {
+      return Collections.emptyList();
+    }
+
+    ArrayList<HRegionInfo> hriList = new ArrayList<HRegionInfo>(dirs.length);
+    for (int i = 0; i < dirs.length; ++i) {
+      // TODO: Load HRI
+    }
+    return hriList;
+  }
+
+  /**
+   * A {@link PathFilter} that returns usertable directories. To get all directories use the
+   * {@link BlackListDirFilter} with a <tt>null</tt> blacklist
+   */
+  private static class UserTableDirFilter extends FSUtils.BlackListDirFilter {
+    public UserTableDirFilter(FileSystem fs) {
+      super(fs, HConstants.HBASE_NON_TABLE_DIRS);
+    }
+
+    protected boolean isValidName(final String name) {
+      if (!super.isValidName(name))
+        return false;
+
+      try {
+        TableName.isLegalTableQualifierName(Bytes.toBytes(name));
+      } catch (IllegalArgumentException e) {
+        LOG.info("INVALID NAME " + name);
+        return false;
+      }
+      return true;
+    }
+  }
+
+  /**
+   * Filter for all dirs that don't start with '.'
+   */
+  private static class RegionDirFilter implements PathFilter {
+    // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
+    final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
+    final FileSystem fs;
+
+    public RegionDirFilter(FileSystem fs) {
+      this.fs = fs;
+    }
+
+    @Override
+    public boolean accept(Path rd) {
+      if (!regionDirPattern.matcher(rd.getName()).matches()) {
+        return false;
+      }
+
+      try {
+        return fs.getFileStatus(rd).isDirectory();
+      } catch (IOException ioe) {
+        // Maybe the file was moved or the fs was disconnected.
+        LOG.warn("Skipping file " + rd +" due to IOException", ioe);
+        return false;
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyRegionFileSystem.java
new file mode 100644
index 0000000..9f8cc70
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/LegacyRegionFileSystem.java
@@ -0,0 +1,293 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSHDFSUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+
+import com.google.common.annotations.VisibleForTesting;
+
+@InterfaceAudience.Private
+public class LegacyRegionFileSystem extends HRegionFileSystem {
+  private static final Log LOG = LogFactory.getLog(LegacyRegionFileSystem.class);
+
+  /** Temporary subdirectory of the region directory used for compaction output. */
+  protected static final String REGION_TEMP_DIR = ".tmp";
+
+  private final Configuration conf;
+  private final HRegionInfo hri;
+  private final Path regionDir;
+  private final FileSystem fs;
+
+  public LegacyRegionFileSystem(Configuration conf, HRegionInfo hri) {
+    this.fs = FSUtils.getCurrentFileSystem(conf);
+    this.hri = hri;
+    this.conf = conf;
+    this.regionDir = getRegionDir(getTableDir(getRootDir()));
+  }
+
+  // ==========================================================================
+  //  ??? methods - do we still need this stuff in 2.0?
+  // ==========================================================================
+  @Override
+  public void writeRecoveryCheckPoint() throws IOException {
+    //checkRegionInfoOnFilesystem
+  }
+
+  @Override
+  public void cleanup() throws IOException {
+    /*
+      // Remove temporary data left over from old regions
+      status.setStatus("Cleaning up temporary data from old regions");
+      fs.cleanupTempDir();
+
+      status.setStatus("Cleaning up detritus from prior splits");
+      // Get rid of any splits or merges that were lost in-progress.  Clean out
+      // these directories here on open.  We may be opening a region that was
+      // being split but we crashed in the middle of it all.
+      fs.cleanupAnySplitDetritus();
+      fs.cleanupMergesDir();
+    */
+  }
+
+  // ==========================================================================
+  //  PUBLIC methods - create/open/destroy
+  // ==========================================================================
+  @Override
+  public void open(boolean rdonly) throws IOException {
+  }
+
+  @Override
+  public void create() throws IOException {
+  }
+
+  @Override
+  public void destroy() throws IOException {
+  }
+
+  // ==========================================================================
+  //  PUBLIC methods - add/remove/list store files
+  // ==========================================================================
+  public Path createTempName(final String suffix) {
+    return new Path(getTempDir(), generateUniqueName(suffix));
+  }
+
+  public Path commitStoreFile(final String familyName, final Path buildPath,
+      final long seqNum, final boolean generateNewName) throws IOException {
+    return null;
+  }
+
+  public void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles)
+      throws IOException {
+  }
+
+  public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
+      throws IOException {
+    return null;
+  }
+
+  public void removeStoreFile(String familyName, Path filePath)
+      throws IOException {
+  }
+
+  public void removeStoreFiles(String familyName, Collection<StoreFile> storeFiles)
+      throws IOException {
+  }
+
+  public Path getStoreFilePath(final String familyName, final String fileName) {
+    return null;
+  }
+
+  /**
+   * Returns the store files available for the family.
+   * This methods performs the filtering based on the valid store files.
+   * @param familyName Column Family Name
+   * @return a set of {@link StoreFileInfo} for the specified family.
+   */
+  public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
+      throws IOException {
+    /*
+    Path familyDir = getStoreDir(this.regionDir, familyName);
+    FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
+    if (files == null) {
+      LOG.debug("No StoreFiles for: " + familyDir);
+      return null;
+    }
+
+    ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
+    for (FileStatus status: files) {
+      if (validate && !StoreFileInfo.isValid(status)) {
+        LOG.warn("Invalid StoreFile: " + status.getPath());
+        continue;
+      }
+      StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
+        regionInfoForFs, familyName, status.getPath());
+      storeFiles.add(info);
+    }
+    return storeFiles;
+    */
+    return null;
+  }
+
+  public StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
+      throws IOException {
+    return null;
+  }
+
+  /**
+   * @return the set of families present on disk
+   * @throws IOException
+   */
+  public Collection<String> getFamilies() throws IOException {
+    FileStatus[] fds = FSUtils.listStatus(fs, this.regionDir, new FamilyDirFilter(fs));
+    if (fds == null) return null;
+
+    ArrayList<String> families = new ArrayList<String>(fds.length);
+    for (FileStatus status: fds) {
+      families.add(status.getPath().getName());
+    }
+
+    return families;
+  }
+
+  // ==========================================================================
+  //  PROTECTED Internals
+  // ==========================================================================
+
+  /**
+   * Generate a unique file name, used by createTempName() and commitStoreFile()
+   * @param suffix extra information to append to the generated name
+   * @return Unique file name
+   */
+  protected static String generateUniqueName(final String suffix) {
+    String name = UUID.randomUUID().toString().replaceAll("-", "");
+    if (suffix != null) name += suffix;
+    return name;
+  }
+
+  public HRegionInfo getRegionInfo() {
+    return hri;
+  }
+
+  // ==========================================================================
+  //  PROTECTED FS-Layout Internals
+  // ==========================================================================
+  /** @return {@link Path} to the region's temp directory, used for file creations */
+  Path getTempDir() {
+    return new Path(getRegionDir(), REGION_TEMP_DIR);
+  }
+
+  protected Path getRootDir() {
+    return new Path("/");
+  }
+
+  protected Path getTableDir(Path baseDir) {
+    TableName table = getTable();
+    return new Path(baseDir, new Path(table.getNamespaceAsString(), table.getQualifierAsString()));
+  }
+
+  protected Path getRegionDir(Path baseDir) {
+    return new Path(baseDir, getRegionInfo().getEncodedName());
+  }
+
+  protected Path getStoreDir(Path baseDir, String familyName) {
+    return new Path(baseDir, familyName);
+  }
+
+  protected Path getStoreFilePath(Path baseDir, String family, String name) {
+    return getStoreFilePath(getStoreDir(baseDir, family), name);
+  }
+
+  protected Path getStoreFilePath(Path baseDir, String name) {
+    return new Path(baseDir, name);
+  }
+
+  @Override
+  public String toString() {
+    return String.format("%s(%s)", getClass().getName(), regionDir);
+  }
+
+  /**
+   * Filter for all dirs that are legal column family names.  This is generally used for colfam
+   * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
+   */
+  private static class FamilyDirFilter implements PathFilter {
+    final FileSystem fs;
+
+    public FamilyDirFilter(FileSystem fs) {
+      this.fs = fs;
+    }
+
+    @Override
+    public boolean accept(Path rd) {
+      try {
+        // throws IAE if invalid
+        HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
+      } catch (IllegalArgumentException iae) {
+        // path name is an invalid family name and thus is excluded.
+        return false;
+      }
+
+      try {
+        return fs.getFileStatus(rd).isDirectory();
+      } catch (IOException ioe) {
+        // Maybe the file was moved or the fs was disconnected.
+        LOG.warn("Skipping file " + rd +" due to IOException", ioe);
+        return false;
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java
deleted file mode 100644
index 8fd651f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.fs.layout;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-public abstract class AFsLayout {
-  protected final static Log LOG = LogFactory.getLog(AFsLayout.class);
-  
-  public abstract HRegionFileSystemFactory getHRegionFileSystemFactory();
-  
-  public abstract Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName);
-
-  public abstract Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir);
-
-  public abstract Path getTableDirFromRegionDir(Path regionDir);
-  
-  public abstract List<Path> getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException;
-
-  public abstract List<FileStatus> getRegionDirFileStats(FileSystem fs, Path tableDir, FSUtils.RegionDirFilter filter) throws IOException;
-
-  public abstract Path getRegionDir(Path tableDir, String name);
-
-  protected AFsLayout() {
-    super();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java
deleted file mode 100644
index 0eb2489..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.fs.layout;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.util.Pair;
-
-public abstract class AHFileLinkManager {
-  public abstract HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) throws IOException;
-  
-  public abstract HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, final Path hFileLinkPattern);
-
-  public abstract Path createPath(final TableName table, final String region, final String family, final String hfile);
-
-  public abstract HFileLink build(final Configuration conf, final TableName table, final String region, final String family, final String hfile)
-      throws IOException;
-
-  public abstract boolean isHFileLink(final Path path);
-
-  public abstract boolean isHFileLink(String fileName);
-
-  public abstract String getReferencedHFileName(final String fileName);
-
-  public abstract Path getHFileLinkPatternRelativePath(Path path);
-
-  public abstract String getReferencedRegionName(final String fileName);
-  
-  public abstract TableName getReferencedTableName(final String fileName);
-  
-  public abstract String createHFileLinkName(final HRegionInfo hfileRegionInfo, final String hfileName);
-
-  public abstract String createHFileLinkName(final TableName tableName, final String regionName, final String hfileName);
-
-  public abstract boolean create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final HRegionInfo hfileRegionInfo, final String hfileName)
-      throws IOException;
-
-  public abstract boolean createFromHFileLink(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String hfileLinkName) throws IOException;
-
-  public abstract String createBackReferenceName(final String tableNameStr, final String regionName);
-
-  public abstract Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath);
-
-  public abstract Pair<TableName, String> parseBackReferenceName(String name);
-
-  public abstract Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath) throws IOException;
-
-  public AHFileLinkManager() {
-    super();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java
deleted file mode 100644
index c97d2a0..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.fs.layout;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Throwables;
-import com.google.common.primitives.Ints;
-
-import edu.umd.cs.findbugs.annotations.NonNull;
-import edu.umd.cs.findbugs.annotations.Nullable;
-
-/**
- * This class exists mostly to allow us to access the layouts statically for convenience, as
- * though the layout classes are Util files. 
- */
-public class FsLayout {
-  private static final Log LOG = LogFactory.getLog(FsLayout.class);
-  
-  public static final String FS_LAYOUT_CHOICE = "hbase.fs.layout.choose";
-  public static final String FS_LAYOUT_DETECT = "hbase.fs.layout.detect";
-  public static final String FS_LAYOUT_DETECT_STRICT = "hbase.fs.layout.detect.strict";
-  public static final String FS_LAYOUT_FILE_NAME = ".fslayout";
-  
-  // TODO: How can we avoid having a volatile variable (slightly slower reads)?
-  // TODO: Move FsLayout class/contents into FSUtils?
-  private static volatile AFsLayout fsLayout = null;
-  
-  public static final PathFilter FS_LAYOUT_PATHFILTER = new PathFilter() {
-    @Override
-    public boolean accept(Path p) {
-      return p.getName().equals(FS_LAYOUT_FILE_NAME);
-    }
-  };
-  
-  @VisibleForTesting
-  static AFsLayout getRaw() {
-    return fsLayout;
-  }
-  
-  public static AFsLayout get() {
-    AFsLayout curLayout = fsLayout;
-    if (curLayout == null) {
-      return initialize(null);
-    } else {
-      return curLayout;
-    }
-  }
-  
-  @VisibleForTesting
-  public static void reset() {
-    LOG.debug("Resetting FS layout to null");
-    fsLayout = null;
-  }
-  
-  @VisibleForTesting
-  public static void setLayoutForTesting(@NonNull AFsLayout inputLayout) {
-    LOG.debug("Setting FS layout to: " + inputLayout.getClass().getSimpleName());
-    fsLayout = inputLayout; 
-  }
-  
-  /*
-   * TODO: Should this be required to be called manually?
-   * Maybe call it manually in some processes (master/regionserver) and automatically everywhere else
-   */
-  @VisibleForTesting
-  static synchronized AFsLayout initialize(Configuration conf) {
-    try {
-      if (fsLayout != null) {
-        LOG.debug("Already initialized FS layout, not going to re-initialize");
-        return fsLayout;
-      }
-      if (conf == null) {
-        conf = HBaseConfiguration.create();
-      }
-      String choice = conf.get(FS_LAYOUT_CHOICE, null);
-      boolean autodetect = conf.getBoolean(FS_LAYOUT_DETECT, false);
-      if (choice != null && autodetect) {
-        throw new IllegalStateException("Configuration both chooses a layout and "
-            + "tries to automatically detect the layout");
-      }
-      if (choice != null) {
-        Class<?> layoutClass = Class.forName(choice);
-        Method getMethod = layoutClass.getMethod("get");
-        return (AFsLayout) getMethod.invoke(null);
-      }
-      if (autodetect) {
-        LOG.debug("Trying to detect hbase layout on filesystem");
-        FileSystem fs = FSUtils.getCurrentFileSystem(conf);
-        Path rootDir = FSUtils.getRootDir(conf);
-        AFsLayout fsLayoutFromFile = readLayoutFile(fs, rootDir);
-        if (fsLayoutFromFile == null) {
-          if (conf.getBoolean(FS_LAYOUT_DETECT_STRICT, false)) {
-            throw new IllegalStateException("Tried to detect fs layout, but there was no layout file at the root!");
-          } else {
-            LOG.debug("Didn't find a layout file, assuming classical hbase fs layout");
-            fsLayout = StandardHBaseFsLayout.get();
-          }
-        } else {
-          LOG.info("Detected hbase fs layout: " + fsLayoutFromFile.getClass().getSimpleName());
-          fsLayout = fsLayoutFromFile;
-        }
-      } else {
-        fsLayout = StandardHBaseFsLayout.get();
-      }
-    } catch (Exception e) {
-      Throwables.propagate(e);
-    }
-    return fsLayout;
-  }
-  
-  public static AFsLayout readLayoutFile(FileSystem fs, Path rootDir) 
-        throws FileNotFoundException, IOException, ClassNotFoundException, 
-        NoSuchMethodException, SecurityException, IllegalAccessException, 
-        IllegalArgumentException, InvocationTargetException {
-    Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME);
-    FileStatus[] statuses = fs.listStatus(rootDir, FS_LAYOUT_PATHFILTER);
-    if (statuses.length != 1) {
-      return null;
-    }
-    FileStatus stat = statuses[0];
-    int len = Ints.checkedCast(stat.getLen());
-    byte[] inputStreamBytes = new byte[len];
-    FSDataInputStream inputStream = fs.open(layoutFilePath);
-    inputStream.readFully(inputStreamBytes);
-    inputStream.close();
-    String layoutClassName = Bytes.toString(inputStreamBytes);
-    Class<?> layoutClass = Class.forName(layoutClassName);
-    Method getMethod = layoutClass.getMethod("get");
-    return (AFsLayout) getMethod.invoke(null);
-  }
-  
-  public static void writeLayoutFile(FileSystem fs, Path rootDir, AFsLayout fsLayout, boolean overwrite) 
-      throws IOException {
-    Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME);
-    FSDataOutputStream outputStream = fs.create(layoutFilePath, overwrite);
-    try {
-      outputStream.write(Bytes.toBytes(fsLayout.getClass().getCanonicalName()));
-    } finally {
-      outputStream.close();
-    }
-  }
-  
-  public static boolean deleteLayoutFile(FileSystem fs, Path rootDir) throws IOException {
-    Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME);
-    return fs.delete(layoutFilePath, false);
-  }
-  
-  public static HRegionFileSystemFactory getHRegionFileSystemFactory() {
-    return get().getHRegionFileSystemFactory();
-  }
-  
-  public static Path getRegionDir(Path tableDir, HRegionInfo regionInfo) {
-    return getRegionDir(tableDir, regionInfo.getEncodedName());
-  }
-
-  public static Path getRegionDir(Path tableDir, String name) {
-    return get().getRegionDir(tableDir, name);
-  }
-  
-  @Nullable
-  public static List<FileStatus> getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter) throws IOException {
-    return get().getRegionDirFileStats(fs, tableDir, filter);
-  }
-
-  public static List<Path> getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException {
-    return get().getRegionDirPaths(fs, tableDir);
-  }
-  
-  public static Path getTableDirFromRegionDir(Path regionDir) {
-    return get().getTableDirFromRegionDir(regionDir);
-  }
-
-  public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir) {
-    return get().getRegionArchiveDir(rootDir, tableName, regiondir);
-  }
-  
-  public static Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) {
-    return get().makeHFileLinkPath(snapshotManifest, regionInfo, familyName, hfileName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java
deleted file mode 100644
index d17428a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.fs.layout;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HierarchicalHRegionFileSystemFactory;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-
-
-public class HierarchicalFsLayout extends AFsLayout {
-  private static final String OLD_REGION_NAME_PADDING = "abcdef1234abcdef1234abcdef1234ab";
-  private static final HierarchicalFsLayout LAYOUT = new HierarchicalFsLayout();
-  
-  static {
-    assert OLD_REGION_NAME_PADDING.length() == 32;
-  }
-  
-  public static HierarchicalFsLayout get() { return LAYOUT; }
-  
-  private HierarchicalFsLayout() { }
-  
-  @Override
-  public HierarchicalHRegionFileSystemFactory getHRegionFileSystemFactory() {
-    return new HierarchicalHRegionFileSystemFactory();
-  }
-  
-  @Override
-  public Path getRegionDir(Path tableDir, String name) {
-    return getHumongousRegionDir(tableDir, name);
-  }
-
-  private Path getHumongousRegionDir(final Path tabledir, final String name) {
-    if (name.length() != HRegionInfo.MD5_HEX_LENGTH) {
-      String table = tabledir.getName();
-      String namespace = tabledir.getParent().getName();
-      
-      // Meta and old root table use the old encoded name format still
-      if (!namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
-        throw new IllegalArgumentException("The region with encoded name " + name
-          + " is not a humongous region, cannot get humongous region dir from it.");
-      }
-      if (!table.equals(TableName.META_TABLE_NAME.getQualifierAsString()) && 
-          !table.equals(TableName.OLD_ROOT_TABLE_NAME.getQualifierAsString())) {
-        throw new IllegalArgumentException("The region with encoded name " + name
-          + " is not a humongous region, cannot get humongous region dir from it.");
-      }
-      
-      // Add padding to guarantee we will have enough characters
-      return new Path(new Path(tabledir, makeBucketName(name, OLD_REGION_NAME_PADDING)), name); 
-    }
-    return new Path(new Path(tabledir, makeBucketName(name, null)), name);
-  }
-  
-  private String makeBucketName(String regionName, String padding) {
-    if (padding != null) {
-      regionName = regionName + padding;
-    }
-    return regionName.substring(HRegionInfo.MD5_HEX_LENGTH
-      - HRegionFileSystem.HUMONGOUS_DIR_NAME_SIZE);
-  }
-
-  @Override
-  public List<FileStatus> getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter)
-          throws IOException {
-    FileStatus[] buckets = FSUtils.listStatus(fs, tableDir);
-    if (buckets == null) {
-      return null;
-    }
-    List<FileStatus> stats = new ArrayList<FileStatus>();
-    for (FileStatus bucket : buckets) {
-      FileStatus[] regionDirs = null;
-      if (filter != null) {
-        regionDirs = fs.listStatus(bucket.getPath(), filter);
-      } else {
-        regionDirs = fs.listStatus(bucket.getPath());
-      }
-      for (FileStatus regionDir : regionDirs) {
-        stats.add(regionDir);
-      }
-    }
-    if (stats.size() == 0) {
-      return null;
-    }
-    return stats;
-  }
-
-  /**
-   * Given a particular table dir, return all the regiondirs inside it, excluding files such as
-   * .tableinfo
-   * @param fs A file system for the Path
-   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
-   * @return List of paths to valid region directories in table dir.
-   * @throws IOException
-   */
-  @Override
-  public List<Path> getRegionDirPaths(final FileSystem fs, final Path tableDir) throws IOException {
-    // assumes we are in a table dir.
-    FileStatus[] rds = fs.listStatus(tableDir, new FSUtils.RegionDirFilter(fs));
-    List<Path> regionDirs = new ArrayList<Path>();
-    for (FileStatus rdfs : rds) {
-      // get all region dirs from bucket dir
-      FileStatus[] bucket_rds = fs.listStatus(rdfs.getPath(),
-          new FSUtils.RegionDirFilter(fs));
-      for (FileStatus bucket_rdfs : bucket_rds) {
-        regionDirs.add(bucket_rdfs.getPath());
-      }
-    }
-    return regionDirs;
-  }
-  
-  @Override
-  public Path getTableDirFromRegionDir(Path regionDir) {
-    return regionDir.getParent().getParent();
-  }
-
-  /**
-   * Get the archive directory for a given region under the specified table
-   * @param tableName the table name. Cannot be null.
-   * @param regiondir the path to the region directory. Cannot be null.
-   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
-   *         should not be archived
-   */
-  @Override
-  public Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName,
-                                         Path regiondir) {
-    // get the archive directory for a table
-    Path archiveDir = HFileArchiveUtil.getTableArchivePath(rootDir, tableName);
-  
-    // then add on the region path under the archive
-    String encodedRegionName = regiondir.getName();
-    String parentName = regiondir.getParent().getName();
-  
-    return new Path(archiveDir, new Path(parentName, encodedRegionName));
-  }
-
-  @Override
-  public Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) {
-    return new Path(new Path(getHumongousRegionDir(snapshotManifest.getSnapshotDir(),
-      regionInfo.getEncodedName()), familyName), hfileName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java
deleted file mode 100644
index 98edb78..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.fs.layout;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-
-
-public class StandardHBaseFsLayout extends AFsLayout {
-  private static final StandardHBaseFsLayout LAYOUT = new StandardHBaseFsLayout();
-  
-  public static StandardHBaseFsLayout get() { return LAYOUT; }
-  
-  private StandardHBaseFsLayout() { }
-  
-  @Override
-  public HRegionFileSystemFactory getHRegionFileSystemFactory() {
-    return new HRegionFileSystemFactory();
-  }
-  
-  @Override
-  public Path getRegionDir(Path tableDir, String name) {
-    return new Path(tableDir, name);
-  }
-
-  @Override
-  public List<FileStatus> getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter)
-          throws IOException {
-    FileStatus[] rds = FSUtils.listStatus(fs, tableDir, filter);
-    if (rds == null) {
-      return null;
-    }
-    List<FileStatus> regionStatus = new ArrayList<FileStatus>(rds.length);
-    for (FileStatus rdfs : rds) {
-      regionStatus.add(rdfs);
-    }
-    return regionStatus;
-  }
-
-  /**
-   * Given a particular table dir, return all the regiondirs inside it, excluding files such as
-   * .tableinfo
-   * @param fs A file system for the Path
-   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
-   * @return List of paths to valid region directories in table dir.
-   * @throws IOException
-   */
-  @Override
-  public List<Path> getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException {
-    // assumes we are in a table dir.
-    FileStatus[] rds = fs.listStatus(tableDir, new FSUtils.RegionDirFilter(fs));
-    List<Path> regionDirs = new ArrayList<Path>();
-    for (FileStatus rdfs : rds) {
-       regionDirs.add(rdfs.getPath());
-    }
-    return regionDirs;
-  }
-  
-  @Override
-  public Path getTableDirFromRegionDir(Path regionDir) {
-    return regionDir.getParent();
-  }
-
-  /**
-   * Get the archive directory for a given region under the specified table
-   * @param tableName the table name. Cannot be null.
-   * @param regiondir the path to the region directory. Cannot be null.
-   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
-   *         should not be archived
-   */
-  @Override
-  public Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName,
-                                         Path regiondir) {
-    // get the archive directory for a table
-    Path archiveDir = HFileArchiveUtil.getTableArchivePath(rootDir, tableName);
-    // then add on the region path under the archive
-    String encodedRegionName = regiondir.getName();
-    return new Path(archiveDir, encodedRegionName);
-  }
-
-  @Override
-  public Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) {
-    return new Path(new Path(new Path(snapshotManifest.getSnapshotDir(),
-      regionInfo.getEncodedName()), familyName), hfileName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index d18dada..4ad310d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -248,7 +248,7 @@ public class HFile {
     protected FileSystem fs;
     protected Path path;
     protected FSDataOutputStream ostream;
-    protected CellComparator comparator = 
+    protected CellComparator comparator =
         CellComparator.COMPARATOR;
     protected InetSocketAddress[] favoredNodes;
     private HFileContext fileContext;
@@ -830,33 +830,6 @@ public class HFile {
   }
 
   /**
-   * Returns all HFiles belonging to the given region directory. Could return an
-   * empty list.
-   *
-   * @param fs  The file system reference.
-   * @param regionDir  The region directory to scan.
-   * @return The list of files found.
-   * @throws IOException When scanning the files fails.
-   */
-  static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
-      throws IOException {
-    List<Path> regionHFiles = new ArrayList<Path>();
-    PathFilter dirFilter = new FSUtils.DirFilter(fs);
-    FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
-    for(FileStatus dir : familyDirs) {
-      FileStatus[] files = fs.listStatus(dir.getPath());
-      for (FileStatus file : files) {
-        if (!file.isDirectory() &&
-            (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) &&
-            (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) {
-          regionHFiles.add(file.getPath());
-        }
-      }
-    }
-    return regionHFiles;
-  }
-
-  /**
    * Checks the given {@link HFile} format version, and throws an exception if
    * invalid. Note that if the version number comes from an input file and has
    * not been verified, the caller needs to re-throw an {@link IOException} to

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 757ad1f..0067de6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -24,6 +24,7 @@ import java.io.DataInput;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -56,10 +57,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
@@ -181,31 +183,28 @@ public class HFilePrettyPrinter extends Configured implements Tool {
       String regionName = cmd.getOptionValue("r");
       byte[] rn = Bytes.toBytes(regionName);
       HRegionInfo hri = HRegionInfo.parseRegionInfoFromRegionName(rn);
-      Path rootDir = FSUtils.getRootDir(getConf());
-      Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf(
-        hri.getTable().getNameAsString()));
-      FileSystem fs = FileSystem.get(getConf());
-      Path regionDir = HRegionFileSystem.create(getConf(), fs, tableDir, hri).getRegionDir();
-      if (verbose)
-        System.out.println("region dir -> " + regionDir);
-      List<Path> regionFiles = HFile.getStoreFiles(fs,
-          regionDir);
-      if (verbose)
-        System.out.println("Number of region files found -> "
-            + regionFiles.size());
-      if (verbose) {
+      HRegionFileSystem rfs = HRegionFileSystem.open(getConf(), hri, true);
+      printIfVerbose("region dir -> " + rfs);
+      for (String family: rfs.getFamilies()) {
+        Collection<StoreFileInfo> storeFiles = rfs.getStoreFiles(family);
+        printIfVerbose("Number of region files found -> " + storeFiles.size());
         int i = 1;
-        for (Path p : regionFiles) {
-          if (verbose)
-            System.out.println("Found file[" + i++ + "] -> " + p);
+        for (StoreFileInfo storeFile : storeFiles) {
+          printIfVerbose("Found file[%d] -> %s", i++, storeFile.getPath());
+          files.add(storeFile.getPath());
         }
       }
-      files.addAll(regionFiles);
     }
 
     return true;
   }
 
+  private void printIfVerbose(String format, Object... args) {
+    if (verbose) {
+      System.out.println(String.format(format, args));
+    }
+  }
+
   /**
    * Runs the command-line pretty-printer, and returns the desired command
    * exit code (zero for success, non-zero for failure).
@@ -245,8 +244,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
   }
 
   private void processFile(Path file) throws IOException {
-    if (verbose)
-      System.out.println("Scanning -> " + file);
+    printIfVerbose("Scanning -> " + file);
     FileSystem fs = file.getFileSystem(getConf());
     if (!fs.exists(file)) {
       System.err.println("ERROR, file doesnt exist: " + file);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 9d78c1b..86bacb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -44,7 +44,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.RegionDoesNotExistException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -188,8 +189,7 @@ public class CatalogJanitor extends ScheduledChore {
     HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
     HRegionFileSystem regionFs = null;
     try {
-      regionFs = HRegionFileSystem.openRegionFromFileSystem(
-          this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
+      regionFs = HRegionFileSystem.open(this.services.getConfiguration(), mergedRegion, true);
     } catch (IOException e) {
       LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
     }
@@ -368,25 +368,14 @@ public class CatalogJanitor extends ScheduledChore {
     Path rootdir = this.services.getMasterFileSystem().getRootDir();
     Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
 
-    HRegionFileSystem hrfs = HRegionFileSystem.create(
-      this.services.getConfiguration(), fs, tabledir, daughter);
+    HRegionFileSystem hrfs = HRegionFileSystem.create(this.services.getConfiguration(), daughter);
     Path daughterRegionDir = hrfs.getRegionDir();
 
-    HRegionFileSystem regionFs = null;
-
-    try {
-      if (!FSUtils.isExists(fs, daughterRegionDir)) {
-        return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
-      }
-    } catch (IOException ioe) {
-      LOG.warn("Error trying to determine if daughter region exists, " +
-               "assuming exists and has references", ioe);
-      return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
-    }
-
+    HRegionFileSystem regionFs;
     try {
-      regionFs = HRegionFileSystem.openRegionFromFileSystem(
-          this.services.getConfiguration(), fs, tabledir, daughter, true);
+      regionFs = HRegionFileSystem.open(this.services.getConfiguration(), daughter, true);
+    } catch (RegionDoesNotExistException e) {
+      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     } catch (IOException e) {
       LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName()
           + ", to: " + parent.getEncodedName() + " assuming has references", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 24f3071..408199d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -51,10 +51,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -504,7 +503,7 @@ public class MasterFileSystem {
 
     return rd;
   }
-  
+
   /**
    * Checks if meta region exists
    *
@@ -625,7 +624,7 @@ public class MasterFileSystem {
 
     Path regionDir = FsLayout.getRegionDir(tableDir, region);
     Path familyDir = new Path(regionDir, Bytes.toString(familyName));
-    
+
     if (fs.delete(familyDir, true) == false) {
       if (fs.exists(familyDir)) {
         throw new IOException("Could not delete family "
@@ -702,38 +701,7 @@ public class MasterFileSystem {
     this.services.getTableDescriptors().add(htd);
     return htd;
   }
-  
-  // TODO: Can't get rid of this in totality because the caller of this method
-  // (TestSplitTransactionOnCluster.testSSHCleanupDaughterRegionsOfABortedSplit)
-  // is testing something where the FS does not agree with the meta
-  // At best can just make an assertion about # of region dirs in MFS and not expose 
-  // what they actually are
-  public List<Path> getRegionDirs(TableName tableName) throws IOException {
-    FileSystem fs = getFileSystem();
-    Path rootDir = FSUtils.getRootDir(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, tableName);
-    return FsLayout.getRegionDirPaths(fs, tableDir);
-  }
-  
-  // Only returns region filesystems for regions in meta
-  // Will ignore anything on filesystem
-  public List<HRegionFileSystem> getRegionFileSystems(Configuration conf, 
-    Connection connection, TableName tableName) throws IOException {
-    
-    FileSystem fs = getFileSystem();
-    Path rootDir = FSUtils.getRootDir(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, tableName);
-    
-    List<HRegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
-    
-    List<HRegionFileSystem> results = new ArrayList<HRegionFileSystem>();
-    for (HRegionInfo regionInfo : regionInfos) {
-      HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, regionInfo);
-      results.add(hrfs);
-    }
-    return results;
-  }
-  
+
   /**
    * The function is used in SSH to set recovery mode based on configuration after all outstanding
    * log split tasks drained.

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index bd672dc..ded09bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -646,7 +646,7 @@ public class RegionStates {
         if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) ==
             null) {
           regionOffline(hri);
-          HRegionFileSystem.deleteRegionDir(server.getConfiguration(), hri);
+          HRegionFileSystem.destroy(server.getConfiguration(), hri);
         }
       } catch (IOException e) {
         LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 9d54b00..06905b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;


[2/3] hbase git commit: Incomplete experiment (it does not compile and has missing code)

Posted by mb...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b33c853..b87c8cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.FilterWrapper;
 import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -593,35 +594,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    * extensions.  Instances of HRegion should be instantiated with the
    * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method.
    *
-   * @param tableDir qualified path of directory where region should be located,
-   * usually the table directory.
-   * @param wal The WAL is the outbound log for any updates to the HRegion
-   * The wal file is a logfile from the previous execution that's
-   * custom-computed for this HRegion. The HRegionServer computes and sorts the
-   * appropriate wal info for this HRegion. If there is a previous wal file
-   * (implying that the HRegion has been written-to before), then read it from
-   * the supplied path.
-   * @param fs is the filesystem.
-   * @param confParam is global configuration settings.
-   * @param regionInfo - HRegionInfo that describes the region
-   * is new), then read them from the supplied path.
-   * @param htd the table descriptor
-   * @param rsServices reference to {@link RegionServerServices} or null
-   * @deprecated Use other constructors.
-   */
-  @Deprecated
-  public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
-      final Configuration confParam, final HRegionInfo regionInfo,
-      final HTableDescriptor htd, final RegionServerServices rsServices) {
-    this(HRegionFileSystem.create(confParam, fs, tableDir, regionInfo),
-      wal, confParam, htd, rsServices);
-  }
-
-  /**
-   * HRegion constructor. This constructor should only be used for testing and
-   * extensions.  Instances of HRegion should be instantiated with the
-   * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method.
-   *
    * @param fs is the filesystem.
    * @param wal The WAL is the outbound log for any updates to the HRegion
    * The wal file is a logfile from the previous execution that's
@@ -794,7 +766,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     // Write HRI to a file in case we need to recover hbase:meta
     status.setStatus("Writing region info on filesystem");
-    fs.checkRegionInfoOnFilesystem();
+    fs.writeRecoveryCheckPoint();
 
     // Initialize all the HStores
     status.setStatus("Initializing all the Stores");
@@ -808,16 +780,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     if (this.writestate.writesEnabled) {
       // Remove temporary data left over from old regions
       status.setStatus("Cleaning up temporary data from old regions");
-      fs.cleanupTempDir();
-    }
-
-    if (this.writestate.writesEnabled) {
-      status.setStatus("Cleaning up detritus from prior splits");
-      // Get rid of any splits or merges that were lost in-progress.  Clean out
-      // these directories here on open.  We may be opening a region that was
-      // being split but we crashed in the middle of it all.
-      fs.cleanupAnySplitDetritus();
-      fs.cleanupMergesDir();
+      fs.cleanup();
     }
 
     // Initialize split policy
@@ -1041,7 +1004,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
     FileSystem fs = tablePath.getFileSystem(conf);
 
-    HRegionFileSystem regionFs = HRegionFileSystem.create(conf, fs, tablePath, regionInfo);
+    HRegionFileSystem regionFs = HRegionFileSystem.create(conf, regionInfo);
     for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
       Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
       if (storeFiles == null) continue;
@@ -5868,20 +5831,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    * @param htd the table descriptor
    * @return the new instance
    */
-  static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs,
-      Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd,
-      RegionServerServices rsServices) {
+  static HRegion newHRegion(Configuration conf, HTableDescriptor htd,
+      HRegionInfo regionInfo, WAL wal, RegionServerServices rsServices) {
     try {
       @SuppressWarnings("unchecked")
       Class<? extends HRegion> regionClass =
           (Class<? extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
 
       Constructor<? extends HRegion> c =
-          regionClass.getConstructor(Path.class, WAL.class, FileSystem.class,
-              Configuration.class, HRegionInfo.class, HTableDescriptor.class,
-              RegionServerServices.class);
+          regionClass.getConstructor(Configuration.class, HTableDescriptor.class,
+              HRegionInfo.class, WAL.class, RegionServerServices.class);
 
-      return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices);
+      return c.newInstance(conf, htd, regionInfo, wal, rsServices);
     } catch (Throwable e) {
       // todo: what should I throw here?
       throw new IllegalStateException("Could not instantiate a region instance.", e);
@@ -5905,11 +5866,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     LOG.info("creating HRegion " + info.getTable().getNameAsString()
         + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
         " Table name == " + info.getTable().getNameAsString());
-    FileSystem fs = FileSystem.get(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
-    HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
-    HRegion region = HRegion.newHRegion(tableDir,
-        wal, fs, conf, info, hTableDescriptor, null);
+    HRegionFileSystem.create(conf, info);
+    HRegion region = HRegion.newHRegion(conf, hTableDescriptor, info, wal, null);
     if (initialize) {
       // If initializing, set the sequenceId. It is also required by WALPerformanceEvaluation when
       // verifying the WALEdits.
@@ -6086,7 +6044,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     if (LOG.isDebugEnabled()) {
       LOG.debug("Opening region: " + info);
     }
-    HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices);
+    HRegion r = HRegion.newHRegion(conf, htd, info, wal, rsServices);
     return r.openHRegion(reporter);
   }
 
@@ -6101,8 +6059,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
       throws IOException {
     HRegionFileSystem regionFs = other.getRegionFileSystem();
-    HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(),
-        other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
+    HRegion r = newHRegion(other.baseConf, other.getTableDesc(),
+        other.getRegionInfo(), other.getWAL(), null);
     return r.openHRegion(reporter);
   }
 
@@ -6150,9 +6108,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       LOG.debug("HRegion.Warming up region: " + info);
     }
 
-    Path rootDir = FSUtils.getRootDir(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
-
     FileSystem fs = null;
     if (rsServices != null) {
       fs = rsServices.getFileSystem();
@@ -6161,7 +6116,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       fs = FileSystem.get(conf);
     }
 
-    HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices);
+    HRegion r = HRegion.newHRegion(conf, htd, info, wal, rsServices);
     r.initializeWarmup(reporter);
     r.close();
   }
@@ -6195,8 +6150,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     fs.commitDaughterRegion(hri);
 
     // Create the daughter HRegion instance
-    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(),
-        this.getBaseConf(), hri, this.getTableDesc(), rsServices);
+    HRegion r = HRegion.newHRegion(this.getBaseConf(), this.getTableDesc(),
+        hri, this.getWAL(), rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
     return r;
@@ -6210,14 +6165,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    */
   HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo,
       final HRegion region_b) throws IOException {
-    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(),
-        fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
-        this.getTableDesc(), this.rsServices);
-    r.readRequestsCount.set(this.getReadRequestsCount()
-        + region_b.getReadRequestsCount());
-    r.writeRequestsCount.set(this.getWriteRequestsCount()
-
-        + region_b.getWriteRequestsCount());
+    HRegion r = HRegion.newHRegion(this.getBaseConf(), this.getTableDesc(),
+        mergedRegionInfo, this.getWAL(), this.rsServices);
+    r.readRequestsCount.set(this.getReadRequestsCount() + region_b.getReadRequestsCount());
+    r.writeRequestsCount.set(this.getWriteRequestsCount() + region_b.getWriteRequestsCount());
     this.fs.commitMergedRegion(mergedRegionInfo);
     return r;
   }
@@ -6248,7 +6199,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       Bytes.toBytes(HConstants.META_VERSION)));
     meta.put(row, HConstants.CATALOG_FAMILY, cells);
   }
-  
+
   /**
    * Determines if the specified row is within the row range specified by the
    * specified HRegionInfo
@@ -7342,9 +7293,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) {
       final WAL wal = walFactory.getMetaWAL(
           HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
-      region = HRegion.newHRegion(p, wal, fs, c,
-        HRegionInfo.FIRST_META_REGIONINFO,
-          fst.get(TableName.META_TABLE_NAME), null);
+      region = HRegion.newHRegion(c, fst.get(TableName.META_TABLE_NAME),
+        HRegionInfo.FIRST_META_REGIONINFO, wal, null);
     } else {
       throw new IOException("Not a known catalog table: " + p.toString());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
deleted file mode 100644
index 9aeb5cd..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ /dev/null
@@ -1,1216 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * View to an on-disk Region.
- * Provides the set of methods necessary to interact with the on-disk region data.
- */
-@InterfaceAudience.Private
-public class HRegionFileSystem {
-  private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
-
-  /** Name of the region info file that resides just under the region directory. */
-  public final static String REGION_INFO_FILE = ".regioninfo";
-
-  /** Temporary subdirectory of the region directory used for merges. */
-  public static final String REGION_MERGES_DIR = ".merges";
-
-  /** Temporary subdirectory of the region directory used for splits. */
-  public static final String REGION_SPLITS_DIR = ".splits";
-
-  /** Temporary subdirectory of the region directory used for compaction output. */
-  protected static final String REGION_TEMP_DIR = ".tmp";
-
-  protected final HRegionInfo regionInfo;
-  //regionInfo for interacting with FS (getting encodedName, etc)
-  protected final HRegionInfo regionInfoForFs;
-  protected final Configuration conf;
-  protected final Path tableDir;
-  protected final FileSystem fs;
-  
-  /** Number of characters for DIR name, 4 characters for 16^4 = 65536 buckets. */
-  public static final int HUMONGOUS_DIR_NAME_SIZE = 4;
-
-  /**
-   * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the
-   * client level.
-   */
-  protected final int hdfsClientRetriesNumber;
-  protected final int baseSleepBeforeRetries;
-  protected static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
-  protected static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
-
-  /**
-   * Use the static factory methods on this class for construction, unless you are an
-   * HRegionFileSystem subclass constructor or the HRegionFileSystemFactory.
-   * 
-   * Create a view to the on-disk region
-   * 
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} that contains the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region
-   */
-  protected HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo) {
-    this.fs = fs;
-    this.conf = conf;
-    this.tableDir = tableDir;
-    this.regionInfo = regionInfo;
-    this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
-    this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
- }
-
-  public static HRegionFileSystem create(final Configuration conf, final FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo) {
-    return FsLayout.getHRegionFileSystemFactory().create(conf, fs, tableDir, regionInfo);
-  }
-  
-  /** @return the underlying {@link FileSystem} */
-  public FileSystem getFileSystem() {
-    return this.fs;
-  }
-
-  /** @return the {@link HRegionInfo} that describe this on-disk region view */
-  public HRegionInfo getRegionInfo() {
-    return this.regionInfo;
-  }
-
-  public HRegionInfo getRegionInfoForFS() {
-    return this.regionInfoForFs;
-  }
-
-  /** @return {@link Path} to the region's root directory. */
-  public Path getTableDir() {
-    return this.tableDir;
-  }
-
-  /** @return {@link Path} to the region directory. */
-  public Path getRegionDir() {
-    return FsLayout.getRegionDir(this.tableDir, this.regionInfoForFs);
-  }
-    
-  /** @return {@link Path} to the daughter region provided */
-  public Path getDaughterRegionDir(HRegionInfo daughter) {
-    return FsLayout.getRegionDir(this.tableDir, daughter);
-  }
-  
-  // ===========================================================================
-  //  Temp Helpers
-  // ===========================================================================
-  /** @return {@link Path} to the region's temp directory, used for file creations */
-  Path getTempDir() {
-    return new Path(getRegionDir(), REGION_TEMP_DIR);
-  }
-
-  /**
-   * Clean up any temp detritus that may have been left around from previous operation attempts.
-   */
-  void cleanupTempDir() throws IOException {
-    deleteDir(getTempDir());
-  }
-
-  // ===========================================================================
-  //  Store/StoreFile Helpers
-  // ===========================================================================
-  /**
-   * Returns the directory path of the specified family
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   */
-  public Path getStoreDir(final String familyName) {
-    return new Path(this.getRegionDir(), familyName);
-  }
-
-  /**
-   * Create the store directory for the specified family name
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   * @throws IOException if the directory creation fails.
-   */
-  Path createStoreDir(final String familyName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating "+storeDir);
-    return storeDir;
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
-    return getStoreFiles(Bytes.toString(familyName));
-  }
-
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
-    return getStoreFiles(familyName, true);
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
-    if (files == null) {
-      LOG.debug("No StoreFiles for: " + familyDir);
-      return null;
-    }
-
-    ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
-    for (FileStatus status: files) {
-      if (validate && !StoreFileInfo.isValid(status)) {
-        LOG.warn("Invalid StoreFile: " + status.getPath());
-        continue;
-      }
-      StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
-        regionInfoForFs, familyName, status.getPath());
-      storeFiles.add(info);
-
-    }
-    return storeFiles;
-  }
-
-  /**
-   * Return Qualified Path of the specified family/file
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The qualified Path for the specified family/file
-   */
-  Path getStoreFilePath(final String familyName, final String fileName) {
-    Path familyDir = getStoreDir(familyName);
-    return new Path(familyDir, fileName).makeQualified(this.fs);
-  }
-
-  /**
-   * Return the store file information of the specified family/file.
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The {@link StoreFileInfo} for the specified family/file
-   */
-  StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
-      regionInfoForFs, familyName, new Path(familyDir, fileName));
-  }
-  
-  void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter)
-      throws IOException {
-    Path splitsDir = getSplitsDir(daughter);
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(getFileSystem(),
-          splitsDir)) {
-      throw new IOException("Failing split. Expected reference file count isn't equal.");
-    }
-  }
-
-  void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, HRegionInfo daughter)
-      throws IOException {
-    Path daughterRegionDir = FsLayout.getRegionDir(getTableDir(), daughter);
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(getFileSystem(),
-          daughterRegionDir)) {
-      throw new IOException("Failing split. Expected reference file count isn't equal.");
-    }
-  }
-  
-  /**
-   * Returns true if the specified family has reference files
-   * @param familyName Column Family Name
-   * @return true if family contains reference files
-   * @throws IOException
-   */
-  public boolean hasReferences(final String familyName) throws IOException {
-    FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
-        new FSUtils.ReferenceFileFilter(fs));
-    return files != null && files.length > 0;
-  }
-
-  /**
-   * Check whether region has Reference file
-   * @param htd table desciptor of the region
-   * @return true if region has reference file
-   * @throws IOException
-   */
-  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
-    for (HColumnDescriptor family : htd.getFamilies()) {
-      if (hasReferences(family.getNameAsString())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * @return the set of families present on disk
-   * @throws IOException
-   */
-  public Collection<String> getFamilies() throws IOException {
-    FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
-    if (fds == null) return null;
-
-    ArrayList<String> families = new ArrayList<String>(fds.length);
-    for (FileStatus status: fds) {
-      families.add(status.getPath().getName());
-    }
-
-    return families;
-  }
-
-  /**
-   * Remove the region family from disk, archiving the store files.
-   * @param familyName Column Family Name
-   * @throws IOException if an error occours during the archiving
-   */
-  public void deleteFamily(final String familyName) throws IOException {
-    // archive family store files
-    HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
-
-    // delete the family folder
-    Path familyDir = getStoreDir(familyName);
-    if(fs.exists(familyDir) && !deleteDir(familyDir))
-      throw new IOException("Could not delete family " + familyName
-          + " from FileSystem for region " + regionInfoForFs.getRegionNameAsString() + "("
-          + regionInfoForFs.getEncodedName() + ")");
-  }
-
-  /**
-   * Generate a unique file name, used by createTempName() and commitStoreFile()
-   * @param suffix extra information to append to the generated name
-   * @return Unique file name
-   */
-  protected static String generateUniqueName(final String suffix) {
-    String name = UUID.randomUUID().toString().replaceAll("-", "");
-    if (suffix != null) name += suffix;
-    return name;
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName() {
-    return createTempName(null);
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @param suffix extra information to append to the generated name
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName(final String suffix) {
-    return new Path(getTempDir(), generateUniqueName(suffix));
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
-    return commitStoreFile(familyName, buildPath, -1, false);
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @param seqNum Sequence Number to append to the file name (less then 0 if no sequence number)
-   * @param generateNewName False if you want to keep the buildPath name
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  protected Path commitStoreFile(final String familyName, final Path buildPath,
-      final long seqNum, final boolean generateNewName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating " + storeDir);
-
-    String name = buildPath.getName();
-    if (generateNewName) {
-      name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
-    }
-    Path dstPath = new Path(storeDir, name);
-    if (!fs.exists(buildPath)) {
-      throw new FileNotFoundException(buildPath.toString());
-    }
-    LOG.debug("Committing store file " + buildPath + " as " + dstPath);
-    // buildPath exists, therefore not doing an exists() check.
-    if (!rename(buildPath, dstPath)) {
-      throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
-    }
-    return dstPath;
-  }
-
-
-  /**
-   * Moves multiple store files to the relative region's family store directory.
-   * @param storeFiles list of store files divided by family
-   * @throws IOException
-   */
-  void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
-    for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
-      String familyName = Bytes.toString(es.getKey());
-      for (StoreFile sf: es.getValue()) {
-        commitStoreFile(familyName, sf.getPath());
-      }
-    }
-  }
-
-  /**
-   * Archives the specified store file from the specified family.
-   * @param familyName Family that contains the store files
-   * @param filePath {@link Path} to the store file to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFile(final String familyName, final Path filePath)
-      throws IOException {
-    HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), filePath);
-  }
-
-  /**
-   * Closes and archives the specified store files from the specified family.
-   * @param familyName Family that contains the store files
-   * @param storeFiles set of store files to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
-      throws IOException {
-    HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), storeFiles);
-  }
-
-  /**
-   * Bulk load: Add a specified store file to the specified family.
-   * If the source file is on the same different file-system is moved from the
-   * source location to the destination location, otherwise is copied over.
-   *
-   * @param familyName Family that will gain the file
-   * @param srcPath {@link Path} to the file to import
-   * @param seqNum Bulk Load sequence number
-   * @return The destination {@link Path} of the bulk loaded file
-   * @throws IOException
-   */
-  Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
-      throws IOException {
-    // Copy the file if it's on another filesystem
-    FileSystem srcFs = srcPath.getFileSystem(conf);
-    FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
-
-    // We can't compare FileSystem instances as equals() includes UGI instance
-    // as part of the comparison and won't work when doing SecureBulkLoad
-    // TODO deal with viewFS
-    if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
-      LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
-          "the destination store. Copying file over to destination filesystem.");
-      Path tmpPath = createTempName();
-      FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
-      LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
-      srcPath = tmpPath;
-    }
-
-    return commitStoreFile(familyName, srcPath, seqNum, true);
-  }
-
-  // ===========================================================================
-  //  Splits Helpers
-  // ===========================================================================
-  /** @return {@link Path} to the temp directory used during split operations */
-  Path getSplitsDir() {
-    return new Path(getRegionDir(), REGION_SPLITS_DIR);
-  }
-
-  Path getSplitsDir(final HRegionInfo hri) {
-    return new Path(getSplitsDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous split attempts.
-   */
-  void cleanupSplitsDir() throws IOException {
-    deleteDir(getSplitsDir());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous
-   * split attempts.
-   * Call this method on initial region deploy.
-   * @throws IOException
-   */
-  void cleanupAnySplitDetritus() throws IOException {
-    Path splitdir = this.getSplitsDir();
-    if (!fs.exists(splitdir)) return;
-    // Look at the splitdir.  It could have the encoded names of the daughter
-    // regions we tried to make.  See if the daughter regions actually got made
-    // out under the tabledir.  If here under splitdir still, then the split did
-    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
-    // where we successfully created daughter a but regionserver crashed during
-    // the creation of region b.  In this case, there'll be an orphan daughter
-    // dir in the filesystem.  TOOD: Fix.
-    FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
-    if (daughters != null) {
-      for (FileStatus daughter: daughters) {
-        Path daughterDir = FsLayout.getRegionDir(getTableDir(), daughter.getPath().getName());
-        if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
-          throw new IOException("Failed delete of " + daughterDir);
-        }
-      }
-    }
-    cleanupSplitsDir();
-    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
-  }
-
-  /**
-   * Remove daughter region
-   * @param regionInfo daughter {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo);
-    if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Commit a daughter region, moving it from the split temporary directory
-   * to the proper location in the filesystem.
-   *
-   * @param regionInfo                 daughter {@link org.apache.hadoop.hbase.HRegionInfo}
-   * @throws IOException
-   */
-  Path commitDaughterRegion(final HRegionInfo regionInfo)
-      throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo);
-    Path daughterTmpDir = this.getSplitsDir(regionInfo);
-    
-    // 
-    // 
-    // /table/bucket2/parent/.splits/daughter/.regioninfo
-    // /table/bucket1
-
-    if (fs.exists(daughterTmpDir)) {
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
-      byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-      
-      // Move the daughter region dir to its final place
-      moveNewRegionFromTmpDirToRegionDir(daughterTmpDir, regionDir);
-    }
-
-    return regionDir;
-  }
-  
-  /**
-   * Finalize the creation of a new region by moving it from a temporary staging
-   * directory to its final region directory in the table directory
-   * 
-   * Example: Moving /table/parent/.splits/daughter to /table/daughter for a new
-   * daughter region created from a region split
-   * 
-   * @param source  temporary staging directory
-   * @param dest    final region directory
-   * @throws IOException 
-   */
-  void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws IOException {
-    if (!rename(source, dest)) {
-      throw new IOException("Unable to rename " + source + " to " + dest);
-    }
-  }
-
-  /**
-   * Create the region splits directory.
-   */
-  void createSplitsDir() throws IOException {
-    Path splitdir = getSplitsDir();
-    if (fs.exists(splitdir)) {
-      LOG.info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
-      if (!deleteDir(splitdir)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
-      }
-    }
-    // splitDir doesn't exists now. No need to do an exists() call for it.
-    if (!createDir(splitdir)) {
-      throw new IOException("Failed create of " + splitdir);
-    }
-  }
-
-  /**
-   * Write out a split reference. Package local so it doesnt leak out of
-   * regionserver.
-   * @param hri {@link HRegionInfo} of the destination
-   * @param familyName Column Family Name
-   * @param f File to split.
-   * @param splitRow Split Row
-   * @param top True if we are referring to the top half of the hfile.
-   * @param splitPolicy
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
-      final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy)
-          throws IOException {
-
-    if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) {
-      // Check whether the split row lies in the range of the store file
-      // If it is outside the range, return directly.
-      try {
-        if (top) {
-          //check if larger than last key.
-          KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
-          byte[] lastKey = f.createReader().getLastKey();
-          // If lastKey is null means storefile is empty.
-          if (lastKey == null) {
-            return null;
-          }
-          if (f.getReader().getComparator().compare(splitKey, lastKey, 0, lastKey.length) > 0) {
-            return null;
-          }
-        } else {
-          //check if smaller than first key
-          KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
-          Cell firstKey = f.createReader().getFirstKey();
-          // If firstKey is null means storefile is empty.
-          if (firstKey == null) {
-            return null;
-          }
-          if (f.getReader().getComparator().compare(splitKey, firstKey) < 0) {
-            return null;
-          }
-        }
-      } finally {
-        f.closeReader(true);
-      }
-    }
-
-    Path splitDir = new Path(getSplitsDir(hri), familyName);
-    // A reference to the bottom half of the hsf store file.
-    Reference r =
-      top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above.  The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String parentRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    return createReferenceFile(r, f, parentRegionName, splitDir);
-  }
-  
-  Path createReferenceFile(Reference r, StoreFile f, String originalRegionName, Path targetDir) throws IOException {
-    Path p = new Path(targetDir, f.getPath().getName() + "." + originalRegionName);
-    return r.write(fs, p);
-  }
-
-  // ===========================================================================
-  //  Merge Helpers
-  // ===========================================================================
-  /** @return {@link Path} to the temp directory used during merge operations */
-  Path getMergesDir() {
-    return new Path(getRegionDir(), REGION_MERGES_DIR);
-  }
-
-  Path getMergesDir(final HRegionInfo hri) {
-    return new Path(getMergesDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any merge detritus that may have been left around from previous merge attempts.
-   */
-  void cleanupMergesDir() throws IOException {
-    deleteDir(getMergesDir());
-  }
-
-  /**
-   * Remove merged region
-   * @param mergedRegion {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegion);
-    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Create the region merges directory.
-   * @throws IOException If merges dir already exists or we fail to create it.
-   * @see HRegionFileSystem#cleanupMergesDir()
-   */
-  void createMergesDir() throws IOException {
-    Path mergesdir = getMergesDir();
-    if (fs.exists(mergesdir)) {
-      LOG.info("The " + mergesdir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(mergesdir, true)) {
-        throw new IOException("Failed deletion of " + mergesdir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(mergesdir))
-      throw new IOException("Failed create of " + mergesdir);
-  }
-
-  /**
-   * Write out a merge reference under the given merges directory. Package local
-   * so it doesnt leak out of regionserver.
-   * @param mergedRegion {@link HRegionInfo} of the merged region
-   * @param familyName Column Family Name
-   * @param f File to create reference.
-   * @param mergedDir
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
-      final StoreFile f, final Path mergedDir)
-      throws IOException {
-    Path referenceDir = new Path(new Path(mergedDir,
-        mergedRegion.getEncodedName()), familyName);
-    // A whole reference to the store file.
-    Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above. The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String mergingRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    return createReferenceFile(r, f, mergingRegionName, referenceDir);
-  }
-
-  /**
-   * Commit a merged region, moving it from the merges temporary directory to
-   * the proper location in the filesystem.
-   * @param mergedRegionInfo merged region {@link HRegionInfo}
-   * @throws IOException
-   */
-  void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegionInfo);
-    Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
-    // Move the tmp dir in the expected location
-    if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
-      moveNewRegionFromTmpDirToRegionDir(mergedRegionTmpDir, regionDir);
-    }
-  }
-
-  // ===========================================================================
-  //  Create/Open/Delete Helpers
-  // ===========================================================================
-  /**
-   * Log the current state of the region
-   * @param LOG log to output information
-   * @throws IOException if an unexpected exception occurs
-   */
-  void logFileSystemState(final Log LOG) throws IOException {
-    FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
-  }
-
-  /**
-   * @param hri
-   * @return Content of the file we write out to the filesystem under a region
-   * @throws IOException
-   */
-  protected static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
-    return hri.toDelimitedByteArray();
-  }
-
-  /**
-   * Create a {@link HRegionInfo} from the serialized version on-disk.
-   * @param fs {@link FileSystem} that contains the Region Info file
-   * @param regionDir {@link Path} to the Region Directory that contains the Info file
-   * @return An {@link HRegionInfo} instance gotten from the Region Info file.
-   * @throws IOException if an error occurred during file open/read operation.
-   */
-  public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
-      throws IOException {
-    FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
-    try {
-      return HRegionInfo.parseFrom(in);
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * Write the .regioninfo file on-disk.
-   */
-  protected static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
-      final Path regionInfoFile, final byte[] content) throws IOException {
-    // First check to get the permissions
-    FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
-    // Write the RegionInfo file content
-    FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
-    try {
-      out.write(content);
-    } finally {
-      out.close();
-    }
-  }
-
-  /**
-   * Write out an info file under the stored region directory. Useful recovering mangled regions.
-   * If the regionInfo already exists on-disk, then we fast exit.
-   */
-  void checkRegionInfoOnFilesystem() throws IOException {
-    // Compose the content of the file so we can compare to length in filesystem. If not same,
-    // rewrite it (it may have been written in the old format using Writables instead of pb). The
-    // pb version is much shorter -- we write now w/o the toString version -- so checking length
-    // only should be sufficient. I don't want to read the file every time to check if it pb
-    // serialized.
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    try {
-      Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-
-      FileStatus status = fs.getFileStatus(regionInfoFile);
-      if (status != null && status.getLen() == content.length) {
-        // Then assume the content good and move on.
-        // NOTE: that the length is not sufficient to define the the content matches.
-        return;
-      }
-
-      LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
-      if (!fs.delete(regionInfoFile, false)) {
-        throw new IOException("Unable to remove existing " + regionInfoFile);
-      }
-    } catch (FileNotFoundException e) {
-      LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName() +
-          " on table " + regionInfo.getTable());
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    writeRegionInfoOnFilesystem(content, true);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering mangled regions.
-   * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
-   */
-  protected void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    writeRegionInfoOnFilesystem(content, useTempDir);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering mangled regions.
-   * @param regionInfoContent serialized version of the {@link HRegionInfo}
-   * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
-   */
-  protected void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
-      final boolean useTempDir) throws IOException {
-    Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-    if (useTempDir) {
-      // Create in tmpDir and then move into place in case we crash after
-      // create but before close. If we don't successfully close the file,
-      // subsequent region reopens will fail the below because create is
-      // registered in NN.
-
-      // And then create the file
-      Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
-
-      // If datanode crashes or if the RS goes down just before the close is called while trying to
-      // close the created regioninfo file in the .tmp directory then on next
-      // creation we will be getting AlreadyCreatedException.
-      // Hence delete and create the file if exists.
-      if (FSUtils.isExists(fs, tmpPath)) {
-        FSUtils.delete(fs, tmpPath, true);
-      }
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
-
-      // Move the created file to the original path
-      if (fs.exists(tmpPath) &&  !rename(tmpPath, regionInfoFile)) {
-        throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
-      }
-    } else {
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-    }
-  }
-
-  /**
-   * Create a new Region on file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @param humongousTable
-   * @throws IOException if the region creation fails due to a FileSystem exception.
-   */
-  public static HRegionFileSystem createRegionOnFileSystem(
-      final Configuration conf, final FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo)
-      throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (fs.exists(regionDir)) {
-      LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
-      throw new IOException("The specified region already exists on disk: " + regionDir);
-    }
-
-    // Create the region directory
-    if (!createDirOnFileSystem(fs, conf, regionDir)) {
-      LOG.warn("Unable to create the region directory: " + regionDir);
-      throw new IOException("Unable to create region directory: " + regionDir);
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    regionFs.writeRegionInfoOnFilesystem(false);
-    return regionFs;
-  }
-  
-  /**
-   * Call this only if you don't have the HRegionInfo in memory.
-   * This method will load it from disk.
-   * 
-   * @param conf
-   * @param fs
-   * @param tableDir
-   * @param encodedRegionName
-   * @param readOnly
-   * @return
-   * @throws IOException
-   */
-  public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final String encodedRegionName, boolean readOnly)
-      throws IOException {
-    Path regionDir = FsLayout.getRegionDir(tableDir, encodedRegionName);
-    HRegionInfo hri = loadRegionInfoFileContent(fs, regionDir);
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, hri);
-
-    if (!regionFs.existsOnDisk()) {
-      LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
-      throw new IOException("The specified region do not exists on disk: " + regionDir);
-    }
-
-    if (!readOnly) {
-      // Cleanup temporary directories
-      regionFs.cleanupTempDir();
-      regionFs.cleanupSplitsDir();
-      regionFs.cleanupMergesDir();
-
-      // if it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
-      regionFs.checkRegionInfoOnFilesystem();
-    }
-
-    return regionFs;
-  }
-
-  /**
-   * Open Region from file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @param readOnly True if you don't want to edit the region data
-   * @throws IOException if the region creation fails due to a FileSystem exception.
-   */
-  public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
-      throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!regionFs.existsOnDisk()) {
-      LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
-      throw new IOException("The specified region do not exists on disk: " + regionDir);
-    }
-
-    if (!readOnly) {
-      // Cleanup temporary directories
-      regionFs.cleanupTempDir();
-      regionFs.cleanupSplitsDir();
-      regionFs.cleanupMergesDir();
-
-      // if it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
-      regionFs.checkRegionInfoOnFilesystem();
-    }
-
-    return regionFs;
-  }
-  
-  /**
-   * Does the region directory for this HRFS instance exist on disk
-   * @return true if the region directory exists
-   * @throws IOException
-   */
-  public boolean existsOnDisk() throws IOException {
-    Path regionDir = getRegionDir();
-    return fs.exists(regionDir);
-  }
-  
-  /**
-   * Delete the region directory if exists.
-   * @param conf
-   * @param hri
-   * @return True if deleted the region directory.
-   * @throws IOException
-   */
-  public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
-  throws IOException {
-    Path rootDir = FSUtils.getRootDir(conf);
-    FileSystem fs = rootDir.getFileSystem(conf);
-    return FSUtils.deleteDirectory(fs,
-      FsLayout.getRegionDir(FSUtils.getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
-  }
-  
-  /**
-   * Remove the region from the table directory, archiving the region's hfiles.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to remove the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be deleted
-   * @throws IOException if the request cannot be completed
-   */
-  public static void deleteAndArchiveRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!fs.exists(regionDir)) {
-      LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
-      return;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETING region " + regionDir);
-    }
-
-    // Archive region
-    Path rootDir = FSUtils.getRootDir(conf);
-    HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
-
-    // Delete empty region dir
-    if (!fs.delete(regionDir, true)) {
-      LOG.warn("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Creates a directory. Assumes the user has already checked for this directory existence.
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
-   *         whether the directory exists or not, and returns true if it exists.
-   * @throws IOException
-   */
-  boolean createDir(Path dir) throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * Renames a directory. Assumes the user has already checked for this directory existence.
-   * @param srcpath
-   * @param dstPath
-   * @return true if rename is successful.
-   * @throws IOException
-   */
-  boolean rename(Path srcpath, Path dstPath) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.rename(srcpath, dstPath);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // successful move
-        // dir is not there, retry after some time.
-        try {
-          sleepBeforeRetry("Rename Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in rename", lastIOE);
-  }
-
-  /**
-   * Deletes a directory. Assumes the user has already checked for this directory existence.
-   * @param dir
-   * @return true if the directory is deleted.
-   * @throws IOException
-   */
-  boolean deleteDir(Path dir) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.delete(dir, true);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(dir)) return true;
-        // dir is there, retry deleting after some time.
-        try {
-          sleepBeforeRetry("Delete Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in DeleteDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic; handles the interrupt exception.
-   */
-  protected void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
-    sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
-  }
-
-  /**
-   * Creates a directory for a filesystem and configuration object. Assumes the user has already
-   * checked for this directory existence.
-   * @param fs
-   * @param conf
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
-   *         whether the directory exists or not, and returns true if it exists.
-   * @throws IOException
-   */
-  protected static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
-      throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic for static methods; handles the interrupt exception. Keeping a static version
-   * for this to avoid re-looking for the integer values.
-   */
-  protected static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
-      int hdfsClientRetriesNumber) throws InterruptedException {
-    if (sleepMultiplier > hdfsClientRetriesNumber) {
-      LOG.debug(msg + ", retries exhausted");
-      return;
-    }
-    LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
-    Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
deleted file mode 100644
index d278624..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-public class HRegionFileSystemFactory {
-  public HRegionFileSystem create(final Configuration conf, final FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo) {
-    return new HRegionFileSystem(conf, fs, tableDir, regionInfo);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 85eac25..e15f61a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -208,7 +209,7 @@ public class HStore implements Store {
     this.fs = region.getRegionFileSystem();
 
     // Assemble the store's home directory and Ensure it exists.
-    fs.createStoreDir(family.getNameAsString());
+    fs.openFamily(family.getNameAsString());
     this.region = region;
     this.family = family;
     // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
@@ -1197,8 +1198,7 @@ public class HStore implements Store {
       // Ready to go. Have list of files to compact.
       LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
           + this + " of " + this.getRegionInfo().getRegionNameAsString()
-          + " into tmpdir=" + fs.getTempDir() + ", totalSize="
-          + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
+          + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
 
       // Commence the compaction.
       List<Path> newFiles = compaction.compact(throughputController);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
deleted file mode 100644
index 5378a69..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.fs.layout.StandardHBaseFsLayout;
-
-import com.google.common.annotations.VisibleForTesting;
-
-public class HierarchicalHRegionFileSystem extends HRegionFileSystem {
-  protected HierarchicalHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir,
-      HRegionInfo regionInfo) {
-    super(conf, fs, tableDir, regionInfo);
-  }
-
-  @Override
-  void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws IOException {
-    fs.mkdirs(dest.getParent());
-    super.moveNewRegionFromTmpDirToRegionDir(source, dest);
-  }
-
-  // Probably will never use this function for real, just in tests to compare
-  // humongous vs regular region dir functionality
-  @VisibleForTesting
-  public Path getStandadHBaseRegionDir() {
-    return StandardHBaseFsLayout.get().getRegionDir(tableDir, regionInfoForFs.getEncodedName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
deleted file mode 100644
index fbca254..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-public class HierarchicalHRegionFileSystemFactory extends HRegionFileSystemFactory {
-  @Override
-  public HRegionFileSystem create(Configuration conf, FileSystem fs, Path tableDir,
-      HRegionInfo regionInfo) {
-    return new HierarchicalHRegionFileSystem(conf, fs, tableDir, regionInfo);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
new file mode 100644
index 0000000..58f1927
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionException;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegionDoesNotExistException extends RegionException {
+  /**
+   * @param msg full description of the failure
+   */
+  public RegionDoesNotExistException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * @param hri expected region to find
+   */
+  public RegionDoesNotExistException(HRegionInfo hri) {
+    super("Region '" + hri + "' doesn't exist on the filesystem");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
index 2e6b821..fcc0eec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -197,7 +198,7 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
           region_a.getRegionInfo().getRegionName());
       if (regionAHasMergeQualifier ||
           hasMergeQualifierInMeta(services, region_b.getRegionInfo().getRegionName())) {
-        LOG.debug("Region " + (regionAHasMergeQualifier ? 
+        LOG.debug("Region " + (regionAHasMergeQualifier ?
             region_a.getRegionInfo().getRegionNameAsString()
                 : region_b.getRegionInfo().getRegionNameAsString())
             + " is not mergeable because it has merge qualifier in META");

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index a7f8495..6ecb4e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
@@ -372,7 +373,7 @@ public class SplitTransactionImpl implements SplitTransaction {
 
     return new PairOfSameType<Region>(a, b);
   }
-  
+
   @VisibleForTesting
   void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter)
       throws IOException {
@@ -386,7 +387,7 @@ public class SplitTransactionImpl implements SplitTransaction {
     this.parent.getRegionFileSystem().assertReferenceFileCountOfDaughterDir(
       expectedReferenceFileCount, daughter);
   }
-  
+
   /**
    * Perform time consuming opening of the daughter regions.
    * @param server Hosting server instance.  Can be null when testing

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index c5ef7fd..69bf1ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -259,19 +260,45 @@ public class StoreFileInfo {
     return reader;
   }
 
+  private interface ComputeFileInfo<T> {
+    T compute(FileSystem fs, FileStatus status, int offset, int length) throws IOException;
+  }
+
   /**
    * Compute the HDFS Block Distribution for this StoreFile
    */
   public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs)
       throws IOException {
+    return computeFileInfo(fs, new ComputeFileInfo<HDFSBlocksDistribution>() {
+      @Override
+      public HDFSBlocksDistribution compute(FileSystem fs, FileStatus status, int offset, int length) {
+        return FSUtils.computeHDFSBlocksDistribution(fs, status, offset, length);
+      }
+    });
+  }
 
+  public BlockLocation[] getFileBlockLocations(final FileSystem fs)
+      throws IOException {
+    return computeFileInfo(fs, new ComputeFileInfo<BlockLocation[]>() {
+      @Override
+      public BlockLocation[] compute(FileSystem fs, FileStatus status, int offset, int length) {
+        return fs.getFileBlockLocations(status, offset, length);
+      }
+    });
+  }
+
+  /**
+   * Compute the HDFS Block Distribution for this StoreFile
+   */
+  private <T> T computeFileInfo(final FileSystem fs,
+      final ComputeFileInfo<T> computeObj) throws IOException {
     // guard against the case where we get the FileStatus from link, but by the time we
     // call compute the file is moved again
     if (this.link != null) {
       FileNotFoundException exToThrow = null;
       for (int i = 0; i < this.link.getLocations().length; i++) {
         try {
-          return computeHDFSBlocksDistributionInternal(fs);
+          return computeFileInfoInternal(fs);
         } catch (FileNotFoundException ex) {
           // try the other location
           exToThrow = ex;
@@ -279,18 +306,49 @@ public class StoreFileInfo {
       }
       throw exToThrow;
     } else {
-      return computeHDFSBlocksDistributionInternal(fs);
+      return computeFileInfoInternal(fs, computeObj);
     }
   }
 
-  private HDFSBlocksDistribution computeHDFSBlocksDistributionInternal(final FileSystem fs)
+  private <T> T computeFileInfoInternal(final FileSystem fs, final ComputeFileInfo<T> computeObj)
       throws IOException {
     FileStatus status = getReferencedFileStatus(fs);
     if (this.reference != null) {
-      return computeRefFileHDFSBlockDistribution(fs, reference, status);
+      return computeRefFileInfo(fs, reference, status, computeObj);
+    } else {
+      return computeObj.compute(fs, status, 0, status.getLen());
+    }
+  }
+
+  /**
+   * helper function to compute HDFS blocks distribution of a given reference
+   * file.For reference file, we don't compute the exact value. We use some
+   * estimate instead given it might be good enough. we assume bottom part
+   * takes the first half of reference file, top part takes the second half
+   * of the reference file. This is just estimate, given
+   * midkey ofregion != midkey of HFile, also the number and size of keys vary.
+   * If this estimate isn't good enough, we can improve it later.
+   * @param fs  The FileSystem
+   * @param reference  The reference
+   * @param status  The reference FileStatus
+   * @return HDFS blocks distribution
+   */
+  private static <T> T computeRefFileInfo(final FileSystem fs, final Reference reference,
+      final FileStatus status, final ComputeFileInfo<T> computeObj) throws IOException {
+    if (status == null) {
+      return null;
+    }
+
+    long start = 0;
+    long length = 0;
+    if (Reference.isTopFileRegion(reference.getFileRegion())) {
+      start = status.getLen()/2;
+      length = status.getLen() - status.getLen()/2;
     } else {
-      return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
+      start = 0;
+      length = status.getLen()/2;
     }
+    return computeObj.compute(fs, status, start, length);
   }
 
   /**
@@ -388,13 +446,13 @@ public class StoreFileInfo {
     Matcher m = REF_NAME_PATTERN.matcher(name);
     return m.matches() && m.groupCount() > 1;
   }
-  
+
   /*
    * Return path to the file referred to by a Reference.  Presumes a directory
    * hierarchy of <code>${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname</code>.
-   * Unless the table is a humongous table in which case the hierarchy is 
+   * Unless the table is a humongous table in which case the hierarchy is
    * <code>${hbase.rootdir}/data/${namespace}/tablename/bucket/regionname/familyname</code>.
-   * 
+   *
    * @param p Path to a Reference file.
    * @return Calculated path to parent region file.
    * @throws IllegalArgumentException when path regex fails to match.
@@ -406,7 +464,7 @@ public class StoreFileInfo {
       throw new IllegalArgumentException("Failed match of store file name " +
           p.toString());
     }
-  
+
     // Other region name is suffix on the passed Reference file name
     String otherRegion = m.group(2);
     // Tabledir is up two directories from where Reference was written.
@@ -417,7 +475,7 @@ public class StoreFileInfo {
       LOG.debug("reference '" + p + "' to region=" + otherRegion
         + " hfile=" + nameStrippedOfSuffix);
     }
-  
+
     return new Path(new Path(FsLayout.getRegionDir(tableDir, otherRegion), p.getParent()
           .getName()), nameStrippedOfSuffix);
   }
@@ -456,39 +514,6 @@ public class StoreFileInfo {
     return validateStoreFileName(p.getName());
   }
 
-  /**
-   * helper function to compute HDFS blocks distribution of a given reference
-   * file.For reference file, we don't compute the exact value. We use some
-   * estimate instead given it might be good enough. we assume bottom part
-   * takes the first half of reference file, top part takes the second half
-   * of the reference file. This is just estimate, given
-   * midkey ofregion != midkey of HFile, also the number and size of keys vary.
-   * If this estimate isn't good enough, we can improve it later.
-   * @param fs  The FileSystem
-   * @param reference  The reference
-   * @param status  The reference FileStatus
-   * @return HDFS blocks distribution
-   */
-  private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
-      final FileSystem fs, final Reference reference, final FileStatus status)
-      throws IOException {
-    if (status == null) {
-      return null;
-    }
-
-    long start = 0;
-    long length = 0;
-
-    if (Reference.isTopFileRegion(reference.getFileRegion())) {
-      start = status.getLen()/2;
-      length = status.getLen() - status.getLen()/2;
-    } else {
-      start = 0;
-      length = status.getLen()/2;
-    }
-    return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
-  }
-
   @Override
   public boolean equals(Object that) {
     if (this == that) return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index e4b29ee..1e40ca1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -592,7 +592,7 @@ public class RestoreSnapshotHelper {
     Path referenceFile = new Path(new Path(FsLayout.getRegionDir(new Path(
       snapshotTable.getNameAsString()), regionInfo), familyDir.getName()), hfileName);
     Path referredToFile = StoreFileInfo.getReferredToFile(referenceFile);
-    
+
     String snapshotRegionName = referredToFile.getParent().getParent().getName();
     String fileName = referredToFile.getName();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 85f08af..406f203 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -40,12 +40,12 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -200,8 +200,7 @@ public class SnapshotManifest {
     RegionVisitor visitor = createRegionVisitor(desc);
 
     // Open the RegionFS
-    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-          tableDir, regionInfo, true);
+    HRegionFileSystem regionFs = HRegionFileSystem.open(conf, regionInfo, true);
     monitor.rethrowException();
 
     // 1. dump region meta info into the snapshot directory

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index ad7c93a..6337174 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -76,8 +76,7 @@ public class SnapshotManifestV1 {
     }
 
     public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
-      HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        fs, snapshotDir, regionInfo);
+      HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createSnapshot(conf, regionInfo);
       return snapshotRegionFs;
     }
 
@@ -157,8 +156,7 @@ public class SnapshotManifestV1 {
 
   static SnapshotRegionManifest buildManifestFromDisk (final Configuration conf,
       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
-    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-          tableDir, regionInfo, true);
+    HRegionFileSystem regionFs = HRegionFileSystem.open(conf, regionInfo, true);
     SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();
 
     // 1. dump region meta info into the snapshot directory