You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2015/10/29 22:30:19 UTC

[2/2] hbase git commit: HBASE-14714 some cleanup to snapshot code

HBASE-14714 some cleanup to snapshot code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/569fe82a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/569fe82a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/569fe82a

Branch: refs/heads/branch-1
Commit: 569fe82acf019acfaa3903402255160092468cbc
Parents: 066f3f7
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Oct 29 13:48:59 2015 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Thu Oct 29 14:04:48 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HTableDescriptor.java   | 15 ++++-
 .../hbase/client/TableSnapshotScanner.java      | 43 ++++---------
 .../hadoop/hbase/master/MasterFileSystem.java   |  4 ++
 .../hbase/master/snapshot/SnapshotManager.java  |  5 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   | 65 ++++++++------------
 .../hbase/snapshot/SnapshotReferenceUtil.java   |  1 -
 .../client/TestRestoreSnapshotFromClient.java   |  1 -
 .../TableSnapshotInputFormatTestBase.java       |  1 -
 .../master/cleaner/TestSnapshotFromMaster.java  | 15 +----
 .../master/snapshot/TestSnapshotManager.java    |  1 -
 .../hbase/snapshot/SnapshotTestingUtils.java    | 32 +++++-----
 .../snapshot/TestFlushSnapshotFromClient.java   | 44 ++++---------
 .../TestRestoreFlushSnapshotFromClient.java     |  5 +-
 .../snapshot/TestRestoreSnapshotHelper.java     | 11 ++--
 14 files changed, 92 insertions(+), 151 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 661edd9..a6c08c3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -371,8 +371,21 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
    * @param desc The descriptor.
    */
   public HTableDescriptor(final HTableDescriptor desc) {
+    this(desc.name, desc);
+  }
+
+  /**
+   * Construct a table descriptor by cloning the descriptor passed as a parameter
+   * but using a different table name.
+   * <p>
+   * Makes a deep copy of the supplied descriptor.
+   * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
+   * @param name Table name.
+   * @param desc The descriptor.
+   */
+  public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
     super();
-    setName(desc.name);
+    setName(name);
     setMetaFlags(this.name);
     for (HColumnDescriptor c: desc.families.values()) {
       this.families.put(c.getName(), new HColumnDescriptor(c));

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index c8e34e5..4601ae4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -34,11 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
@@ -49,10 +45,10 @@ import org.apache.hadoop.hbase.util.FSUtils;
  * <p>
  * This also allows one to run the scan from an
  * online or offline hbase cluster. The snapshot files can be exported by using the
- * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, 
- * and this scanner can be used to run the scan directly over the snapshot files. 
- * The snapshot should not be deleted while there are open scanners reading from snapshot 
- * files.
+ * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
+ * to a pure-hdfs cluster, and this scanner can be used to
+ * run the scan directly over the snapshot files. The snapshot should not be deleted while there
+ * are open scanners reading from snapshot files.
  *
  * <p>
  * An internal RegionScanner is used to execute the {@link Scan} obtained
@@ -125,23 +121,14 @@ public class TableSnapshotScanner extends AbstractClientScanner {
   }
 
   private void init() throws IOException {
-    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
-    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
-    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
-
-    // load table descriptor
-    htd = manifest.getTableDescriptor();
-
-    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
-    if (regionManifests == null) {
-      throw new IllegalArgumentException("Snapshot seems empty");
-    }
-
-    regions = new ArrayList<HRegionInfo>(regionManifests.size());
-    for (SnapshotRegionManifest regionManifest : regionManifests) {
-      // load region descriptor
-      HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
-
+    final RestoreSnapshotHelper.RestoreMetaChanges meta =
+      RestoreSnapshotHelper.copySnapshotForScanner(
+        conf, fs, rootDir, restoreDir, snapshotName);
+    final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
+
+    htd = meta.getTableDescriptor();
+    regions = new ArrayList<HRegionInfo>(restoredRegions.size());
+    for (HRegionInfo hri: restoredRegions) {
       if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
           hri.getStartKey(), hri.getEndKey())) {
         regions.add(hri);
@@ -150,11 +137,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
 
     // sort for regions according to startKey.
     Collections.sort(regions);
-
     initScanMetrics(scan);
-
-    RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
-      rootDir, restoreDir, snapshotName);
   }
 
   @Override
@@ -184,7 +167,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
         if (result == null) {
           currentRegionScanner.close();
           currentRegionScanner = null;
-        }        
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 02d18d6..5d42399 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -596,4 +596,8 @@ public class MasterFileSystem {
   public RecoveryMode getLogRecoveryMode() {
     return this.splitLogManager.getRecoveryMode();
   }
+
+  public void logFileSystemState(Log log) throws IOException {
+    FSUtils.logFileSystemState(fs, rootdir, log);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index cfddaca..c1d0ad4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
@@ -741,7 +740,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
         cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc);
       }
     } else {
-      HTableDescriptor htd = RestoreSnapshotHelper.cloneTableSchema(snapshotTableDesc, tableName);
+      HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
       if (cpHost != null) {
         cpHost.preCloneSnapshot(reqSnapshot, htd);
       }
@@ -761,7 +760,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
       }
     }
   }
-  
+
   private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, TableName tableName)
       throws IOException {
     if (this.master.getMasterQuotaManager().isQuotaEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index a09942e..0c2e2ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -40,7 +41,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -183,7 +182,7 @@ public class RestoreSnapshotHelper {
       return null;
     }
 
-    RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap);
+    RestoreMetaChanges metaChanges = new RestoreMetaChanges(tableDesc, parentsMap);
 
     // Take a copy of the manifest.keySet() since we are going to modify
     // this instance, by removing the regions already present in the restore dir.
@@ -245,13 +244,19 @@ public class RestoreSnapshotHelper {
    */
   public static class RestoreMetaChanges {
     private final Map<String, Pair<String, String> > parentsMap;
+    private final HTableDescriptor htd;
 
     private List<HRegionInfo> regionsToRestore = null;
     private List<HRegionInfo> regionsToRemove = null;
     private List<HRegionInfo> regionsToAdd = null;
 
-    RestoreMetaChanges(final Map<String, Pair<String, String> > parentsMap) {
+    RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
       this.parentsMap = parentsMap;
+      this.htd = htd;
+    }
+
+    public HTableDescriptor getTableDescriptor() {
+      return htd;
     }
 
     /**
@@ -477,13 +482,12 @@ public class RestoreSnapshotHelper {
    * @return The set of files in the specified family directory.
    */
   private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
-    Set<String> familyFiles = new HashSet<String>();
-
     FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
-    if (hfiles == null) return familyFiles;
+    if (hfiles == null) return Collections.emptySet();
 
-    for (FileStatus hfileRef: hfiles) {
-      String hfileName = hfileRef.getPath().getName();
+    Set<String> familyFiles = new HashSet<String>(hfiles.length);
+    for (int i = 0; i < hfiles.length; ++i) {
+      String hfileName = hfiles[i].getPath().getName();
       familyFiles.add(hfileName);
     }
 
@@ -667,7 +671,11 @@ public class RestoreSnapshotHelper {
    * @return the new HRegion instance
    */
   public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
-    HRegionInfo regionInfo = new HRegionInfo(tableDesc.getTableName(),
+    return cloneRegionInfo(tableDesc.getTableName(), snapshotRegionInfo);
+  }
+
+  public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
+    HRegionInfo regionInfo = new HRegionInfo(tableName,
                       snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
                       snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
     regionInfo.setOffline(snapshotRegionInfo.isOffline());
@@ -682,9 +690,9 @@ public class RestoreSnapshotHelper {
     FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
     if (regionDirs == null) return null;
 
-    List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
-    for (FileStatus regionDir: regionDirs) {
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath());
+    List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
+    for (int i = 0; i < regionDirs.length; ++i) {
+      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
       regions.add(hri);
     }
     LOG.debug("found " + regions.size() + " regions for table=" +
@@ -693,30 +701,6 @@ public class RestoreSnapshotHelper {
   }
 
   /**
-   * Create a new table descriptor cloning the snapshot table schema.
-   *
-   * @param snapshotTableDescriptor
-   * @param tableName
-   * @return cloned table descriptor
-   * @throws IOException
-   */
-  public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
-      final TableName tableName) throws IOException {
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
-      htd.addFamily(hcd);
-    }
-    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-        snapshotTableDescriptor.getValues().entrySet()) {
-      htd.setValue(e.getKey(), e.getValue());
-    }
-    for (Map.Entry<String, String> e: snapshotTableDescriptor.getConfiguration().entrySet()) {
-      htd.setConfiguration(e.getKey(), e.getValue());
-    }
-    return htd;
-  }
-
-  /**
    * Copy the snapshot files for a snapshot scanner, discards meta changes.
    * @param conf
    * @param fs
@@ -725,8 +709,8 @@ public class RestoreSnapshotHelper {
    * @param snapshotName
    * @throws IOException
    */
-  public static void copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir,
-      Path restoreDir, String snapshotName) throws IOException {
+  public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs,
+      Path rootDir, Path restoreDir, String snapshotName) throws IOException {
     // ensure that restore dir is not under root dir
     if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
       throw new IllegalArgumentException("Filesystems for restore directory and HBase root directory " +
@@ -749,11 +733,12 @@ public class RestoreSnapshotHelper {
     // in the base hbase root dir.
     RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs,
       manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false);
-    helper.restoreHdfsRegions(); // TODO: parallelize.
+    RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize.
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Restored table dir:" + restoreDir);
       FSUtils.logFileSystemState(fs, restoreDir, LOG);
     }
+    return metaChanges;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
index c62d53d..b997432 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 
 /**
  * Utility methods for interacting with the snapshot referenced files.

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 4837f81..b0f07bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -25,7 +25,6 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
index 3b42f9a..3808806 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
index b3c3726..08bafdc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
@@ -22,9 +22,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
@@ -39,7 +37,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
@@ -381,17 +378,7 @@ public class TestSnapshotFromMaster {
   private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir,
       FileSystem fs, TableName tableName) throws IOException {
     Path tableArchive = FSUtils.getTableDir(archiveDir, tableName);
-    Path[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive);
-    List<String> files = new ArrayList<String>(archivedHFiles.length);
-    LOG.debug("Have archived hfiles: " + tableArchive);
-    for (Path file : archivedHFiles) {
-      LOG.debug(file);
-      files.add(file.getName());
-    }
-    // sort the archived files
-
-    Collections.sort(files);
-    return files;
+    return SnapshotTestingUtils.listHFileNames(fs, tableArchive);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
index 8066ec3..066d6f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.master.MasterFileSystem;

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index a710d29..b79f7b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.Arrays;
+import java.util.Collections;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -43,11 +43,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
@@ -150,6 +148,14 @@ public class SnapshotTestingUtils {
         tableName);
   }
 
+  public static void confirmSnapshotValid(HBaseTestingUtility testUtil,
+      SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
+      throws IOException {
+    MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
+    confirmSnapshotValid(snapshotDescriptor, tableName, family,
+        mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem());
+  }
+
   /**
    * Confirm that the snapshot contains references to all the files that should
    * be in the snapshot.
@@ -330,28 +336,18 @@ public class SnapshotTestingUtils {
    * @return array of the current HFiles in the table (could be a zero-length array)
    * @throws IOException on unexecpted error reading the FS
    */
-  public static Path[] listHFiles(final FileSystem fs, final Path tableDir)
+  public static ArrayList<String> listHFileNames(final FileSystem fs, final Path tableDir)
       throws IOException {
-    final ArrayList<Path> hfiles = new ArrayList<Path>();
+    final ArrayList<String> hfiles = new ArrayList<String>();
     FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
       @Override
       public void storeFile(final String region, final String family, final String hfileName)
           throws IOException {
-        hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName))));
+        hfiles.add(hfileName);
       }
     });
-    return hfiles.toArray(new Path[hfiles.size()]);
-  }
-
-  public static String[] listHFileNames(final FileSystem fs, final Path tableDir)
-      throws IOException {
-    Path[] files = listHFiles(fs, tableDir);
-    String[] names = new String[files.length];
-    for (int i = 0; i < files.length; ++i) {
-      names[i] = files[i].getName();
-    }
-    Arrays.sort(names);
-    return names;
+    Collections.sort(hfiles);
+    return hfiles;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
index d8c6ab5..667c015 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -140,8 +139,7 @@ public class TestFlushSnapshotFromClient {
     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
 
     LOG.debug("FS state before snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
     // take a snapshot of the enabled table
     String snapshotString = "offlineTableSnapshot";
@@ -154,14 +152,10 @@ public class TestFlushSnapshotFromClient {
       snapshot, TABLE_NAME);
 
     // make sure its a valid snapshot
-    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
-    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
     LOG.debug("FS state after snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
-    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
-        admin, fs);
+    SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
   }
 
    /**
@@ -180,8 +174,7 @@ public class TestFlushSnapshotFromClient {
     }
 
     LOG.debug("FS state before snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
     // take a snapshot of the enabled table
     String snapshotString = "skipFlushTableSnapshot";
@@ -194,14 +187,10 @@ public class TestFlushSnapshotFromClient {
         snapshot, TABLE_NAME);
 
     // make sure its a valid snapshot
-    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
-    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
     LOG.debug("FS state after snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
-    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
-        admin, fs);
+    SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
 
     admin.deleteSnapshot(snapshot);
     snapshots = admin.listSnapshots();
@@ -223,8 +212,7 @@ public class TestFlushSnapshotFromClient {
     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
 
     LOG.debug("FS state before snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
     // take a snapshot of the enabled table
     String snapshotString = "offlineTableSnapshot";
@@ -242,14 +230,10 @@ public class TestFlushSnapshotFromClient {
       snapshot, TABLE_NAME);
 
     // make sure its a valid snapshot
-    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
-    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
     LOG.debug("FS state after snapshot:");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-        FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
-    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
-        admin, fs);
+    SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
   }
 
   @Test (timeout=300000)
@@ -296,8 +280,8 @@ public class TestFlushSnapshotFromClient {
     HMaster master = UTIL.getMiniHBaseCluster().getMaster();
     SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
     LOG.info(" === Async Snapshot Completed ===");
-    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
-      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
+
     // make sure we get the snapshot
     SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
   }
@@ -497,7 +481,7 @@ public class TestFlushSnapshotFromClient {
     }
 
     // dump for debugging
-    logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
+    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
     List<SnapshotDescription> taken = admin.listSnapshots();
     int takenSize = taken.size();
@@ -521,10 +505,6 @@ public class TestFlushSnapshotFromClient {
     UTIL.deleteTable(TABLE2_NAME);
   }
 
-  private void logFSTree(Path root) throws IOException {
-    FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
-  }
-
   private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
       throws IOException, InterruptedException {
     Admin admin = UTIL.getHBaseAdmin();

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
index ba419ad..37f3c4c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
@@ -27,12 +27,10 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -199,7 +197,6 @@ public class TestRestoreFlushSnapshotFromClient {
   //  Helpers
   // ==========================================================================
   private void logFSTree() throws IOException {
-    MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
-    FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
+    UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/569fe82a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 4ec81f0..cd5a5fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -116,12 +117,12 @@ public class TestRestoreSnapshotHelper {
 
   private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
       final HTableDescriptor htdClone) throws IOException {
-    String[] files = SnapshotTestingUtils.listHFileNames(fs,
+    List<String> files = SnapshotTestingUtils.listHFileNames(fs,
       FSUtils.getTableDir(rootDir, htdClone.getTableName()));
-    assertEquals(12, files.length);
-    for (int i = 0; i < files.length; i += 2) {
-      String linkFile = files[i];
-      String refFile = files[i+1];
+    assertEquals(12, files.size());
+    for (int i = 0; i < files.size(); i += 2) {
+      String linkFile = files.get(i);
+      String refFile = files.get(i+1);
       assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
       assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
       assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));