You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2018/10/15 21:00:08 UTC

[1/3] hbase git commit: HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 59cb65aa3 -> 137423d70


http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
index 6583b2c..ea44eb4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hbase.snapshot;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 
+import java.net.URI;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -32,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -91,14 +94,118 @@ public class TestSnapshotDescriptionUtils {
     Path snapshotDir = new Path(root, HConstants.SNAPSHOT_DIR_NAME);
     Path tmpDir = new Path(snapshotDir, ".tmp");
     Path workingDir = new Path(tmpDir, "not_a_snapshot");
+    Configuration conf = new Configuration();
     assertFalse("Already have working snapshot dir: " + workingDir
         + " but shouldn't. Test file leak?", fs.exists(workingDir));
-    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
     try {
-      SnapshotDescriptionUtils.completeSnapshot(snapshot, root, workingDir, fs);
+      SnapshotDescriptionUtils.completeSnapshot(snapshotDir, workingDir, fs,
+          workingDir.getFileSystem(conf), conf);
       fail("Shouldn't successfully complete move of a non-existent directory.");
     } catch (IOException e) {
-      LOG.info("Correctly failed to move non-existant directory: " + e.getMessage());
+      LOG.info("Correctly failed to move non-existant directory: ", e);
     }
   }
+
+  @Test
+  public void testIsSubDirectoryWorks() {
+    Path rootDir = new Path("hdfs://root/.hbase-snapshot/");
+
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(rootDir, rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://root/.hbase-snapshotdir"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://root/.hbase-snapshot"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://.hbase-snapshot"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://.hbase-snapshot/.tmp"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://root"), rootDir));
+    assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://root/.hbase-snapshot/.tmp"), rootDir));
+    assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("hdfs://root/.hbase-snapshot/.tmp/snapshot"), rootDir));
+
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("s3://root/.hbase-snapshot/"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("s3://root"), rootDir));
+    assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
+        new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), rootDir));
+  }
+
+  @Test
+  public void testIsWithinWorkingDir() throws IOException {
+    Configuration conf = new Configuration();
+    FSUtils.setRootDir(conf, root);
+
+    Path rootDir = root.makeQualified(fs);
+
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        rootDir, conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path(rootDir,".hbase-snapshotdir"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path(rootDir,".hbase-snapshot"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path("hdfs://.hbase-snapshot"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path("hdfs://.hbase-snapshot/.tmp"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("hdfs://root"), conf));
+    assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path(rootDir,".hbase-snapshot/.tmp"), conf));
+    assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path(rootDir, ".hbase-snapshot/.tmp/snapshot"), conf));
+
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path("s3://root/.hbase-snapshot/"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("s3://root"), conf));
+    assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
+        new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), conf));
+  }
+
+  @Test
+  public void testShouldSkipRenameSnapshotDirectories() {
+    URI workingDirURI = URI.create("/User/test1");
+    URI rootDirURI = URI.create("hdfs:///User/test2");
+
+    // should skip rename if it's not the same scheme;
+    assertTrue(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    workingDirURI = URI.create("/User/test1");
+    rootDirURI = URI.create("file:///User/test2");
+    assertFalse(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    // skip rename when either scheme or authority are the not same
+    workingDirURI = URI.create("hdfs://localhost:8020/User/test1");
+    rootDirURI = URI.create("hdfs://otherhost:8020/User/test2");
+    assertTrue(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    workingDirURI = URI.create("file:///User/test1");
+    rootDirURI = URI.create("hdfs://localhost:8020/User/test2");
+    assertTrue(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    workingDirURI = URI.create("hdfs:///User/test1");
+    rootDirURI = URI.create("hdfs:///User/test2");
+    assertFalse(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    workingDirURI = URI.create("hdfs://localhost:8020/User/test1");
+    rootDirURI = URI.create("hdfs://localhost:8020/User/test2");
+    assertFalse(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    workingDirURI = URI.create("hdfs://user:password@localhost:8020/User/test1");
+    rootDirURI = URI.create("hdfs://user:password@localhost:8020/User/test2");
+    assertFalse(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+
+    // skip rename when user information is not the same
+    workingDirURI = URI.create("hdfs://user:password@localhost:8020/User/test1");
+    rootDirURI = URI.create("hdfs://user2:password2@localhost:8020/User/test2");
+    assertTrue(
+        SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI));
+  }
 }


[2/3] hbase git commit: HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

Posted by ap...@apache.org.
HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

Test fixes by: Taklon Wu <wu...@amazon.com>

Signed-off-by: Zach York <zy...@apache.org>
Signed-off-by: Mingliang Liu <li...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8da4697b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8da4697b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8da4697b

Branch: refs/heads/branch-1.4
Commit: 8da4697b66487d0fde0cf80c16846ab341e823eb
Parents: 59cb65a
Author: Tyler Mi <mt...@amazon.com>
Authored: Tue Sep 4 16:44:59 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon Oct 15 13:58:28 2018 -0700

----------------------------------------------------------------------
 .../src/main/resources/hbase-default.xml        |   9 +
 .../snapshot/DisabledTableSnapshotHandler.java  |   5 +-
 .../snapshot/EnabledTableSnapshotHandler.java   |   2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |  21 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  37 +-
 .../master/snapshot/TakeSnapshotHandler.java    |  58 ++-
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |   8 +-
 .../snapshot/SnapshotDescriptionUtils.java      | 138 +++++-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  57 ++-
 .../hbase/snapshot/SnapshotManifestV1.java      |  15 +-
 .../hbase/snapshot/SnapshotManifestV2.java      |  15 +-
 .../TestSnapshotDFSTemporaryDirectory.java      |  73 +++
 .../client/TestSnapshotTemporaryDirectory.java  | 468 +++++++++++++++++++
 ...hotTemporaryDirectoryWithRegionReplicas.java |  31 ++
 .../snapshot/TestSnapshotHFileCleaner.java      |   6 +-
 .../hbase/snapshot/SnapshotTestingUtils.java    |  15 +-
 ...estExportSnapshotWithTemporaryDirectory.java |  54 +++
 .../hbase/snapshot/TestRegionSnapshotTask.java  |   4 +-
 .../snapshot/TestSnapshotDescriptionUtils.java  | 113 ++++-
 20 files changed, 1007 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index e1ae0ef..87a82e6 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1226,6 +1226,15 @@ possible configurations would overwhelm and obscure the important.
       to create a name based on what you are restoring.</description>
   </property>
   <property>
+    <name>hbase.snapshot.working.dir</name>
+    <value></value>
+    <description>Location where the snapshotting process will occur. The location of the
+      completed snapshots will not change, but the temporary directory where the snapshot
+      process occurs will be set to this location. This can be a separate filesystem than
+      the root directory, for performance increase purposes. See HBASE-21098 for more
+      information</description>
+  </property>
+  <property>
     <name>hbase.server.compactchecker.interval.multiplier</name>
     <value>1000</value>
     <description>The number that determines how often we scan to see if compaction is necessary.

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
index 15fa0c5..dc9394e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
@@ -53,9 +53,12 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
   /**
    * @param snapshot descriptor of the snapshot to take
    * @param masterServices master services provider
+   * @throws IOException if it cannot access the filesystem of the snapshot
+   *         temporary directory
    */
   public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
-      final MasterServices masterServices, final SnapshotManager snapshotManager) {
+      final MasterServices masterServices, final SnapshotManager snapshotManager)
+      throws IOException {
     super(snapshot, masterServices, snapshotManager);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
index fc58455..f424c38 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
@@ -50,7 +50,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
   private final ProcedureCoordinator coordinator;
 
   public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
-                                     final SnapshotManager manager) {
+      final SnapshotManager manager) throws IOException {
     super(snapshot, master, manager);
     this.coordinator = manager.getCoordinator();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index bb54fc3..dee5293 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 /**
@@ -77,21 +78,20 @@ public final class MasterSnapshotVerifier {
   private static final Log LOG = LogFactory.getLog(MasterSnapshotVerifier.class);
 
   private SnapshotDescription snapshot;
-  private FileSystem fs;
-  private Path rootDir;
+  private FileSystem workingDirFs;
   private TableName tableName;
   private MasterServices services;
 
   /**
    * @param services services for the master
    * @param snapshot snapshot to check
-   * @param rootDir root directory of the hbase installation.
+   * @param workingDirFs the file system containing the temporary snapshot information
    */
-  public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) {
-    this.fs = services.getMasterFileSystem().getFileSystem();
+  public MasterSnapshotVerifier(MasterServices services,
+      SnapshotDescription snapshot, FileSystem workingDirFs) {
+    this.workingDirFs = workingDirFs;
     this.services = services;
     this.snapshot = snapshot;
-    this.rootDir = rootDir;
     this.tableName = TableName.valueOf(snapshot.getTable());
   }
 
@@ -105,7 +105,7 @@ public final class MasterSnapshotVerifier {
    */
   public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers)
       throws CorruptedSnapshotException, IOException {
-    SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs,
+    SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), workingDirFs,
                                                       snapshotDir, snapshot);
     // verify snapshot info matches
     verifySnapshotDescription(snapshotDir);
@@ -122,7 +122,8 @@ public final class MasterSnapshotVerifier {
    * @param snapshotDir snapshot directory to check
    */
   private void verifySnapshotDescription(Path snapshotDir) throws CorruptedSnapshotException {
-    SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+    SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(workingDirFs,
+        snapshotDir);
     if (!this.snapshot.equals(found)) {
       throw new CorruptedSnapshotException("Snapshot read (" + found
           + ") doesn't equal snapshot we ran (" + snapshot + ").", snapshot);
@@ -195,7 +196,9 @@ public final class MasterSnapshotVerifier {
     }
 
     // Verify Snapshot HFiles
-    SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), fs, manifest);
+    // Requires the root directory file system as HFiles are stored in the root directory
+    SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(),
+        FSUtils.getRootDirFileSystem(services.getConfiguration()), manifest);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index b12debf..bc0aa8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -264,11 +264,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    */
   void resetTempDir() throws IOException {
     // cleanup any existing snapshots.
-    Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir);
-    if (master.getMasterFileSystem().getFileSystem().exists(tmpdir)) {
-      if (!master.getMasterFileSystem().getFileSystem().delete(tmpdir, true)) {
-        LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
-      }
+    Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+        master.getConfiguration());
+    FileSystem tmpFs = tmpdir.getFileSystem(master.getConfiguration());
+    if (!tmpFs.delete(tmpdir, true)) {
+      LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
     }
   }
 
@@ -419,8 +419,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    */
   private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot)
       throws HBaseSnapshotException {
-    FileSystem fs = master.getMasterFileSystem().getFileSystem();
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
+        master.getConfiguration());
     TableName snapshotTable =
         TableName.valueOf(snapshot.getTable());
 
@@ -445,12 +445,13 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     }
 
     try {
+      FileSystem workingDirFS = workingDir.getFileSystem(master.getConfiguration());
       // delete the working directory, since we aren't running the snapshot. Likely leftovers
       // from a failed attempt.
-      fs.delete(workingDir, true);
+      workingDirFS.delete(workingDir, true);
 
       // recreate the working directory for the snapshot
-      if (!fs.mkdirs(workingDir)) {
+      if (!workingDirFS.mkdirs(workingDir)) {
         throw new SnapshotCreationException("Couldn't create working directory (" + workingDir
             + ") for snapshot" , snapshot);
       }
@@ -465,10 +466,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   /**
    * Take a snapshot of a disabled table.
    * @param snapshot description of the snapshot to take. Modified to be {@link Type#DISABLED}.
-   * @throws HBaseSnapshotException if the snapshot could not be started
+   * @throws IOException if the snapshot could not be started or filesystem for snapshot
+   *         temporary directory could not be determined
    */
   private synchronized void snapshotDisabledTable(SnapshotDescription snapshot)
-      throws HBaseSnapshotException {
+      throws IOException {
     // setup the snapshot
     prepareToTakeSnapshot(snapshot);
 
@@ -484,10 +486,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   /**
    * Take a snapshot of an enabled table.
    * @param snapshot description of the snapshot to take.
-   * @throws HBaseSnapshotException if the snapshot could not be started
+   * @throws IOException if the snapshot could not be started or filesystem for snapshot
+   *         temporary directory could not be determined
    */
   private synchronized void snapshotEnabledTable(SnapshotDescription snapshot)
-      throws HBaseSnapshotException {
+          throws IOException {
     // setup the snapshot
     prepareToTakeSnapshot(snapshot);
 
@@ -506,16 +509,18 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    * @param handler the snapshot handler
    */
   private synchronized void snapshotTable(SnapshotDescription snapshot,
-      final TakeSnapshotHandler handler) throws HBaseSnapshotException {
+      final TakeSnapshotHandler handler) throws IOException {
     try {
       handler.prepare();
       this.executorService.submit(handler);
       this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
     } catch (Exception e) {
       // cleanup the working directory by trying to delete it from the fs.
-      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
+          master.getConfiguration());
+      FileSystem workingDirFs = workingDir.getFileSystem(master.getConfiguration());
       try {
-        if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
+        if (!workingDirFs.delete(workingDir, true)) {
           LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
               ClientSnapshotDescriptionUtils.toString(snapshot));
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 73b5675..e9d415b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.snapshot;
 
+import com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.HashSet;
@@ -77,7 +78,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
   protected final SnapshotDescription snapshot;
   protected final Configuration conf;
-  protected final FileSystem fs;
+  protected final FileSystem rootFs;
+  protected final FileSystem workingDirFs;
   protected final Path rootDir;
   private final Path snapshotDir;
   protected final Path workingDir;
@@ -95,24 +97,33 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   /**
    * @param snapshot descriptor of the snapshot to take
    * @param masterServices master services provider
+   * @throws IllegalArgumentException if the working snapshot directory set from the
+   *   configuration is the same as the completed snapshot directory
+   * @throws IOException if the file system of the working snapshot directory cannot be
+   *   determined
    */
   public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices,
-                             final SnapshotManager snapshotManager) {
+                             final SnapshotManager snapshotManager) throws IOException {
     super(masterServices, EventType.C_M_SNAPSHOT_TABLE);
     assert snapshot != null : "SnapshotDescription must not be nul1";
     assert masterServices != null : "MasterServices must not be nul1";
-
     this.master = masterServices;
+    this.conf = this.master.getConfiguration();
+    this.rootDir = this.master.getMasterFileSystem().getRootDir();
+    this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
+    Preconditions.checkArgument(!SnapshotDescriptionUtils.isSubDirectoryOf(workingDir, rootDir) ||
+            SnapshotDescriptionUtils.isWithinDefaultWorkingDir(workingDir, conf),
+        "The working directory " + workingDir + " cannot be in the root directory unless it is "
+            + "within the default working directory");
+
     this.snapshot = snapshot;
     this.snapshotManager = snapshotManager;
     this.snapshotTable = TableName.valueOf(snapshot.getTable());
-    this.conf = this.master.getConfiguration();
-    this.fs = this.master.getMasterFileSystem().getFileSystem();
-    this.rootDir = this.master.getMasterFileSystem().getRootDir();
+    this.rootFs = this.master.getMasterFileSystem().getFileSystem();
     this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
-    this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    this.workingDirFs = this.workingDir.getFileSystem(this.conf);
     this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
-    this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
+    this.snapshotManifest = SnapshotManifest.create(conf, rootFs, workingDir, snapshot, monitor);
 
     this.tableLockManager = master.getTableLockManager();
     this.tableLock = this.tableLockManager.writeLock(
@@ -120,7 +131,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
         EventType.C_M_SNAPSHOT_TABLE.toString());
 
     // prepare the verify
-    this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir);
+    this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, workingDirFs);
     // update the running tasks
     this.status = TaskMonitor.get().createStatus(
       "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
@@ -172,7 +183,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       // an external exception that gets captured here.
 
       // write down the snapshot info in the working directory
-      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
+      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
       snapshotManifest.addTableDescriptor(this.htd);
       monitor.rethrowException();
 
@@ -208,7 +219,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       verifier.verifySnapshot(this.workingDir, serverNames);
 
       // complete the snapshot, atomically moving from tmp to .snapshot dir.
-      completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
+      completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs);
       msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
       status.markComplete(msg);
       LOG.info(msg);
@@ -228,7 +239,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       try {
         // if the working dir is still present, the snapshot has failed.  it is present we delete
         // it.
-        if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) {
+        if (!workingDirFs.delete(workingDir, true)) {
           LOG.error("Couldn't delete snapshot working directory:" + workingDir);
         }
       } catch (IOException e) {
@@ -250,22 +261,21 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   }
 
   /**
-   * Reset the manager to allow another snapshot to proceed
+   * Reset the manager to allow another snapshot to proceed.
+   * Commits the snapshot process by moving the working snapshot
+   * to the finalized filepath
+   *
+   * @param snapshotDir The file path of the completed snapshots
+   * @param workingDir  The file path of the in progress snapshots
+   * @param fs The file system of the completed snapshots
+   * @param workingDirFs The file system of the in progress snapshots
    *
-   * @param snapshotDir final path of the snapshot
-   * @param workingDir directory where the in progress snapshot was built
-   * @param fs {@link FileSystem} where the snapshot was built
    * @throws SnapshotCreationException if the snapshot could not be moved
    * @throws IOException the filesystem could not be reached
    */
-  public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs)
-      throws SnapshotCreationException, IOException {
-    LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to "
-        + snapshotDir);
-    if (!fs.rename(workingDir, snapshotDir)) {
-      throw new SnapshotCreationException("Failed to move working directory(" + workingDir
-          + ") to completed directory(" + snapshotDir + ").");
-    }
+  public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
+      FileSystem workingDirFs) throws SnapshotCreationException, IOException {
+    SnapshotDescriptionUtils.completeSnapshot(snapshotDir, workingDir, fs, workingDirFs, conf);
     finished = true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 2b83395..79505e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3944,7 +3944,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public void addRegionToSnapshot(SnapshotDescription desc,
       ForeignExceptionSnare exnSnare) throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
-    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
+    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
 
     SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
                                                         snapshotDir, desc, exnSnare);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index 0824189..3b9c3a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -922,10 +922,12 @@ public class ExportSnapshot extends Configured implements Tool {
     FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
     LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString());
 
-    boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false);
+    boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) ||
+        conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
 
     Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
-    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot);
+    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot,
+        destConf);
     Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
     Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
 
@@ -935,7 +937,7 @@ public class ExportSnapshot extends Configured implements Tool {
       if (skipTmp) {
         needSetOwnerDir = outputSnapshotDir;
       } else {
-        needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot);
+        needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
         if (outputFs.exists(needSetOwnerDir)) {
           needSetOwnerDir = snapshotTmpDir;
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index cdc1b1f..722bb23 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hbase.snapshot;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
+import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collections;
 
@@ -28,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
@@ -112,6 +115,12 @@ public final class SnapshotDescriptionUtils {
   /** Temporary directory under the snapshot directory to store in-progress snapshots */
   public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
 
+  /**
+   * The configuration property that determines the filepath of the snapshot
+   * base working directory
+   */
+  public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
+
   /** This tag will be created in in-progess snapshots */
   public static final String SNAPSHOT_IN_PROGRESS = ".inprogress";
   // snapshot operation values
@@ -191,46 +200,52 @@ public final class SnapshotDescriptionUtils {
    * @return the final directory for the completed snapshot
    */
   public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
-    return getCompletedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
+    return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
   }
 
   /**
    * Get the general working directory for snapshots - where they are built, where they are
    * temporarily copied on export, etc.
    * @param rootDir root directory of the HBase installation
+   * @param conf Configuration of the HBase instance
    * @return Path to the snapshot tmp directory, relative to the passed root directory
    */
-  public static Path getWorkingSnapshotDir(final Path rootDir) {
-    return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
+  public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) {
+    return new Path(conf.get(SNAPSHOT_WORKING_DIR,
+        getDefaultWorkingSnapshotDir(rootDir).toString()));
   }
 
   /**
    * Get the directory to build a snapshot, before it is finalized
    * @param snapshot snapshot that will be built
    * @param rootDir root directory of the hbase installation
+   * @param conf Configuration of the HBase instance
    * @return {@link Path} where one can build a snapshot
    */
-  public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir) {
-    return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshot.getName());
+  public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir,
+      Configuration conf) {
+    return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf);
   }
 
   /**
    * Get the directory to build a snapshot, before it is finalized
    * @param snapshotName name of the snapshot
    * @param rootDir root directory of the hbase installation
+   * @param conf Configuration of the HBase instance
    * @return {@link Path} where one can build a snapshot
    */
-  public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir) {
-    return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshotName);
+  public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
+      Configuration conf) {
+    return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
   }
 
   /**
-   * Get the directory to store the snapshot instance
-   * @param snapshotsDir hbase-global directory for storing all snapshots
+   * Get the directory within the given filepath to store the snapshot instance
+   * @param snapshotsDir directory to store snapshot directory within
    * @param snapshotName name of the snapshot to take
-   * @return the final directory for the completed snapshot
+   * @return the final directory for the snapshot in the given filepath
    */
-  private static final Path getCompletedSnapshotDir(final Path snapshotsDir, String snapshotName) {
+  private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
     return new Path(snapshotsDir, snapshotName);
   }
 
@@ -243,6 +258,40 @@ public final class SnapshotDescriptionUtils {
   }
 
   /**
+   * Determines if the given workingDir is a subdirectory of the given "root directory"
+   * @param workingDir a directory to check
+   * @param rootDir root directory of the HBase installation
+   * @return true if the given workingDir is a subdirectory of the given root directory,
+   *   false otherwise
+   */
+  public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) {
+    return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR);
+  }
+
+  /**
+   * Determines if the given workingDir is a subdirectory of the default working snapshot directory
+   * @param workingDir a directory to check
+   * @param conf configuration for the HBase cluster
+   * @return true if the given workingDir is a subdirectory of the default working directory for
+   *   snapshots, false otherwise
+   */
+  public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf)
+      throws IOException {
+    Path defaultWorkingDir = getDefaultWorkingSnapshotDir(FSUtils.getRootDir(conf));
+    return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
+  }
+
+  /**
+   * Get the default working directory for snapshots - where they are built, where they are
+   * temporarily copied on export, etc.
+   * @param rootDir root directory of the HBase installation
+   * @return Path to the default snapshot tmp directory, relative to the passed root directory
+   */
+  private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
+    return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
+  }
+
+  /**
    * Convert the passed snapshot description into a 'full' snapshot description based on default
    * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't
    * supplied to their default values.
@@ -346,25 +395,70 @@ public final class SnapshotDescriptionUtils {
   /**
    * Move the finished snapshot to its final, publicly visible directory - this marks the snapshot
    * as 'complete'.
-   * @param snapshot description of the snapshot being tabken
-   * @param rootdir root directory of the hbase installation
-   * @param workingDir directory where the in progress snapshot was built
-   * @param fs {@link FileSystem} where the snapshot was built
+   * @param snapshotDir The file path of the completed snapshots
+   * @param workingDir The file path of the in progress snapshots
+   * @param fs {@link FileSystem} The file system of the completed snapshots
+   * @param workingDirFs The file system of the in progress snapshots
    * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if the
    * snapshot could not be moved
    * @throws IOException the filesystem could not be reached
    */
-  public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir,
-      FileSystem fs) throws SnapshotCreationException, IOException {
-    Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir);
+  public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
+      FileSystem workingDirFs, Configuration conf) throws SnapshotCreationException, IOException {
     LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to "
-        + finishedDir);
-    if (!fs.rename(workingDir, finishedDir)) {
-      throw new SnapshotCreationException("Failed to move working directory(" + workingDir
-          + ") to completed directory(" + finishedDir + ").", snapshot);
+        + snapshotDir);
+    if (!workingDirFs.exists(workingDir)) {
+      throw new IOException("Failed to moving nonexistent snapshot working directory "
+          + workingDir + " to " + snapshotDir);
+    }
+    // If the working and completed snapshot directory are on the same file system, attempt
+    // to rename the working snapshot directory to the completed location. If that fails,
+    // or the file systems differ, attempt to copy the directory over, throwing an exception
+    // if this fails
+    URI workingURI = workingDirFs.getUri();
+    URI rootURI = fs.getUri();
+
+    if ((shouldSkipRenameSnapshotDirectories(workingURI, rootURI)
+        || !fs.rename(workingDir, snapshotDir))
+         && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf)) {
+      throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
+          + ") to completed directory(" + snapshotDir + ").");
     }
   }
 
+  @VisibleForTesting
+  static boolean shouldSkipRenameSnapshotDirectories(URI workingURI, URI rootURI) {
+    // check scheme, e.g. file, hdfs
+    if (workingURI.getScheme() == null &&
+        (rootURI.getScheme() != null && !rootURI.getScheme().equalsIgnoreCase("file"))) {
+      return true;
+    }
+    if (workingURI.getScheme() != null && !workingURI.getScheme().equals(rootURI.getScheme())) {
+      return true;
+    }
+
+    // check Authority, e.g. localhost:port
+    if (workingURI.getAuthority() == null && rootURI.getAuthority() != null) {
+      return true;
+    }
+    if (workingURI.getAuthority() != null &&
+        !workingURI.getAuthority().equals(rootURI.getAuthority())) {
+      return true;
+    }
+
+    // check UGI/userInfo
+    if (workingURI.getUserInfo() == null && rootURI.getUserInfo() != null) {
+      return true;
+    }
+    if (workingURI.getUserInfo() != null &&
+        !workingURI.getUserInfo().equals(rootURI.getUserInfo())) {
+      return true;
+    }
+
+    return false;
+  }
+
+
   /**
    * Check if the user is this table snapshot's owner
    * @param snapshot the table snapshot description

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 7822fa4..8a73773 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -79,18 +79,29 @@ public final class SnapshotManifest {
   private final ForeignExceptionSnare monitor;
   private final Configuration conf;
   private final Path workingDir;
-  private final FileSystem fs;
+  private final FileSystem rootFs;
+  private final FileSystem workingDirFs;
   private int manifestSizeLimit;
 
-  private SnapshotManifest(final Configuration conf, final FileSystem fs,
+  /**
+   *
+   * @param conf configuration file for HBase setup
+   * @param rootFs root filesystem containing HFiles
+   * @param workingDir file path of where the manifest should be located
+   * @param desc description of snapshot being taken
+   * @param monitor monitor of foreign exceptions
+   * @throws IOException if the working directory file system cannot be
+   *                     determined from the config file
+   */
+  private SnapshotManifest(final Configuration conf, final FileSystem rootFs,
       final Path workingDir, final SnapshotDescription desc,
-      final ForeignExceptionSnare monitor) {
+      final ForeignExceptionSnare monitor) throws IOException {
     this.monitor = monitor;
     this.desc = desc;
     this.workingDir = workingDir;
     this.conf = conf;
-    this.fs = fs;
-
+    this.rootFs = rootFs;
+    this.workingDirFs = this.workingDir.getFileSystem(this.conf);
     this.manifestSizeLimit = conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
   }
 
@@ -109,7 +120,7 @@ public final class SnapshotManifest {
    */
   public static SnapshotManifest create(final Configuration conf, final FileSystem fs,
       final Path workingDir, final SnapshotDescription desc,
-      final ForeignExceptionSnare monitor) {
+      final ForeignExceptionSnare monitor) throws IOException {
     return new SnapshotManifest(conf, fs, workingDir, desc, monitor);
 
   }
@@ -152,9 +163,9 @@ public final class SnapshotManifest {
   private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION:
-        return new SnapshotManifestV1.ManifestBuilder(conf, fs, workingDir);
+        return new SnapshotManifestV1.ManifestBuilder(conf, rootFs, workingDir);
       case SnapshotManifestV2.DESCRIPTOR_VERSION:
-        return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir);
+        return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir);
       default:
         throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc);
     }
@@ -223,7 +234,7 @@ public final class SnapshotManifest {
       throws IOException {
 
     // Open the RegionFS
-    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
+    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs,
         tableDir, regionInfo, true);
     monitor.rethrowException();
 
@@ -283,11 +294,11 @@ public final class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
-            SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+            SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc);
         } finally {
           tpool.shutdown();
         }
@@ -304,9 +315,10 @@ public final class SnapshotManifest {
           List<SnapshotRegionManifest> v1Regions, v2Regions;
           ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
           try {
-            v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
-            v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
-                manifestSizeLimit);
+            v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs,
+                workingDir, desc);
+            v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs,
+                workingDir, desc, manifestSizeLimit);
           } catch (InvalidProtocolBufferException e) {
             throw new CorruptedSnapshotException("unable to parse region manifest " +
                 e.getMessage(), e);
@@ -380,7 +392,7 @@ public final class SnapshotManifest {
       Path rootDir = FSUtils.getRootDir(conf);
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
-      new FSTableDescriptors(conf, fs, rootDir)
+      new FSTableDescriptors(conf, workingDirFs, rootDir)
         .createTableDescriptorForTableDirectory(workingDir, htd, false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest");
@@ -397,9 +409,10 @@ public final class SnapshotManifest {
     List<SnapshotRegionManifest> v1Regions, v2Regions;
     ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
     try {
-      v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
-      v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
-          manifestSizeLimit);
+      v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs,
+          workingDir, desc);
+      v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs,
+          workingDir, desc, manifestSizeLimit);
     } finally {
       tpool.shutdown();
     }
@@ -429,12 +442,12 @@ public final class SnapshotManifest {
     // them we will get the same information.
     if (v1Regions != null && v1Regions.size() > 0) {
       for (SnapshotRegionManifest regionManifest: v1Regions) {
-        SnapshotManifestV1.deleteRegionManifest(fs, workingDir, regionManifest);
+        SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
       }
     }
     if (v2Regions != null && v2Regions.size() > 0) {
       for (SnapshotRegionManifest regionManifest: v2Regions) {
-        SnapshotManifestV2.deleteRegionManifest(fs, workingDir, regionManifest);
+        SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
       }
     }
   }
@@ -444,7 +457,7 @@ public final class SnapshotManifest {
    */
   private void writeDataManifest(final SnapshotDataManifest manifest)
       throws IOException {
-    FSDataOutputStream stream = fs.create(new Path(workingDir, DATA_MANIFEST_NAME));
+    FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME));
     try {
       manifest.writeTo(stream);
     } finally {
@@ -458,7 +471,7 @@ public final class SnapshotManifest {
   private SnapshotDataManifest readDataManifest() throws IOException {
     FSDataInputStream in = null;
     try {
-      in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME));
+      in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME));
       CodedInputStream cin = CodedInputStream.newInstance(in);
       cin.setSizeLimit(manifestSizeLimit);
       return SnapshotDataManifest.parseFrom(cin);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index a5afb91..74aaa4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -66,17 +66,20 @@ public final class SnapshotManifestV1 {
                                                           HRegionFileSystem, Path> {
     private final Configuration conf;
     private final Path snapshotDir;
-    private final FileSystem fs;
+    private final FileSystem rootFs;
+    private final FileSystem workingDirFs;
 
-    public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+    public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
+        final Path snapshotDir) throws IOException {
       this.snapshotDir = snapshotDir;
       this.conf = conf;
-      this.fs = fs;
+      this.rootFs = rootFs;
+      this.workingDirFs = snapshotDir.getFileSystem(conf);
     }
 
     public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
       HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        fs, snapshotDir, regionInfo);
+        workingDirFs, snapshotDir, regionInfo);
       return snapshotRegionFs;
     }
 
@@ -97,13 +100,13 @@ public final class SnapshotManifestV1 {
       boolean success = true;
       if (storeFile.isReference()) {
         // write the Reference object to the snapshot
-        storeFile.getReference().write(fs, referenceFile);
+        storeFile.getReference().write(workingDirFs, referenceFile);
       } else {
         // create "reference" to this store file.  It is intentionally an empty file -- all
         // necessary information is captured by its fs location and filename.  This allows us to
         // only figure out what needs to be done via a single nn operation (instead of having to
         // open and read the files as well).
-        success = fs.createNewFile(referenceFile);
+        success = workingDirFs.createNewFile(referenceFile);
       }
       if (!success) {
         throw new IOException("Failed to create reference file:" + referenceFile);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
index 3b918a0..433c003 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
@@ -68,12 +68,13 @@ public final class SnapshotManifestV2 {
                     SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> {
     private final Configuration conf;
     private final Path snapshotDir;
-    private final FileSystem fs;
+    private final FileSystem rootFs;
 
-    public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+    public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
+        final Path snapshotDir) {
       this.snapshotDir = snapshotDir;
       this.conf = conf;
-      this.fs = fs;
+      this.rootFs = rootFs;
     }
 
     public SnapshotRegionManifest.Builder regionOpen(final HRegionInfo regionInfo) {
@@ -85,9 +86,11 @@ public final class SnapshotManifestV2 {
     public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException {
       // we should ensure the snapshot dir exist, maybe it has been deleted by master
       // see HBASE-16464
-      if (fs.exists(snapshotDir)) {
+      FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf);
+      if (workingDirFs.exists(snapshotDir)) {
         SnapshotRegionManifest manifest = region.build();
-        FSDataOutputStream stream = fs.create(getRegionManifestPath(snapshotDir, manifest));
+        FSDataOutputStream stream = workingDirFs.create(
+            getRegionManifestPath(snapshotDir, manifest));
         try {
           manifest.writeTo(stream);
         } finally {
@@ -120,7 +123,7 @@ public final class SnapshotManifestV2 {
       if (storeFile.isReference()) {
         sfManifest.setReference(storeFile.getReference().convert());
       }
-      sfManifest.setFileSize(storeFile.getReferencedFileStatus(fs).getLen());
+      sfManifest.setFileSize(storeFile.getReferencedFileStatus(rootFs).getLen());
       family.addStoreFiles(sfManifest.build());
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
new file mode 100644
index 0000000..5ac2f0f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+/**
+ * This class tests that the use of a temporary snapshot directory supports snapshot functionality
+ * while the temporary directory is on the same file system as the root directory
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category(LargeTests.class)
+public class TestSnapshotDFSTemporaryDirectory
+    extends TestSnapshotTemporaryDirectory {
+
+  /**
+   * Setup the config for the cluster
+   *
+   * @throws Exception on failure
+   */
+  @BeforeClass public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+    admin = UTIL.getHBaseAdmin();
+  }
+
+  private static void setupConf(Configuration conf) throws IOException {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 10);
+    conf.setInt("hbase.hstore.compactionThreshold", 10);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+        ConstantSizeRegionSplitPolicy.class.getName());
+
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, UTIL.getDefaultRootDirPath().toString()
+        + Path.SEPARATOR + UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir"
+        + Path.SEPARATOR);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
new file mode 100644
index 0000000..cbf02f2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.UUID;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * This class tests that the use of a temporary snapshot directory supports snapshot functionality
+ * while the temporary directory is on a different file system than the root directory
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
+public class TestSnapshotTemporaryDirectory {
+
+  @Parameterized.Parameters public static Iterable<Integer> data() {
+    return Arrays
+        .asList(SnapshotManifestV1.DESCRIPTOR_VERSION, SnapshotManifestV2.DESCRIPTOR_VERSION);
+  }
+
+  @Parameterized.Parameter public int manifestVersion;
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotTemporaryDirectory.class);
+  protected static final int NUM_RS = 2;
+  protected static String TEMP_DIR =
+      Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR + UUID.randomUUID().toString();
+
+  protected static Admin admin;
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  protected static final String STRING_TABLE_NAME = "test";
+  protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
+
+  /**
+   * Setup the config for the cluster
+   *
+   * @throws Exception on failure
+   */
+  @BeforeClass public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+    admin = UTIL.getHBaseAdmin();
+  }
+
+  private static void setupConf(Configuration conf) {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 10);
+    conf.setInt("hbase.hstore.compactionThreshold", 10);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+        ConstantSizeRegionSplitPolicy.class.getName());
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
+  }
+
+  @Before public void setup() throws Exception {
+    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
+    htd.setRegionReplication(getNumReplicas());
+    UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
+  }
+
+  protected int getNumReplicas() {
+    return 1;
+  }
+
+  @After public void tearDown() throws Exception {
+    UTIL.deleteTable(TABLE_NAME);
+    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+    SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+  }
+
+  @AfterClass public static void cleanupTest() {
+    try {
+      UTIL.shutdownMiniCluster();
+      FileUtils.deleteDirectory(new File(TEMP_DIR));
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  @Test(timeout = 180000) public void testRestoreDisabledSnapshot()
+      throws IOException, InterruptedException {
+    long tid = System.currentTimeMillis();
+    TableName tableName = TableName.valueOf("testtb-" + tid);
+    byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+    byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+    int snapshot0Rows;
+    int snapshot1Rows;
+
+    // create Table and disable it
+    SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
+    admin.disableTable(tableName);
+
+    // take an empty snapshot
+    takeSnapshot(tableName, Bytes.toString(emptySnapshot), true);
+
+    // enable table and insert data
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot0Rows = UTIL.countRows(table);
+    }
+    admin.disableTable(tableName);
+
+    // take a snapshot
+    takeSnapshot(tableName, Bytes.toString(snapshotName0), true);
+
+    // enable table and insert more data
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot1Rows = UTIL.countRows(table);
+    }
+
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    admin.disableTable(tableName);
+    takeSnapshot(tableName, Bytes.toString(snapshotName1), true);
+
+    // Restore from snapshot-0
+    admin.restoreSnapshot(snapshotName0);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from emptySnapshot
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(emptySnapshot);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    UTIL.deleteTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+  }
+
+  @Test(timeout = 180000) public void testRestoreEnabledSnapshot()
+      throws IOException, InterruptedException {
+    long tid = System.currentTimeMillis();
+    TableName tableName = TableName.valueOf("testtb-" + tid);
+    byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+    byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+    int snapshot0Rows;
+    int snapshot1Rows;
+
+    // create Table
+    SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
+
+    // take an empty snapshot
+    takeSnapshot(tableName, Bytes.toString(emptySnapshot), false);
+
+    // Insert data
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot0Rows = UTIL.countRows(table);
+    }
+
+    // take a snapshot
+    takeSnapshot(tableName, Bytes.toString(snapshotName0), false);
+
+    // Insert more data
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot1Rows = UTIL.countRows(table);
+    }
+
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    takeSnapshot(tableName, Bytes.toString(snapshotName1), false);
+
+    // Restore from snapshot-0
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName0);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from emptySnapshot
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(emptySnapshot);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    UTIL.deleteTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+  }
+
+  /**
+   * Test snapshotting a table that is offline
+   *
+   * @throws Exception if snapshot does not complete successfully
+   */
+  @Test(timeout = 300000) public void testOfflineTableSnapshot() throws Exception {
+    Admin admin = UTIL.getHBaseAdmin();
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // put some stuff in the table
+    Table table = UTIL.getConnection().getTable(TABLE_NAME);
+    UTIL.loadTable(table, TEST_FAM, false);
+
+    LOG.debug("FS state before disable:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+    // XXX if this is flakey, might want to consider using the async version and looping as
+    // disableTable can succeed and still timeout.
+    admin.disableTable(TABLE_NAME);
+
+    LOG.debug("FS state before snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    // take a snapshot of the disabled table
+    final String SNAPSHOT_NAME = "offlineTableSnapshot";
+    byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
+    takeSnapshot(TABLE_NAME, SNAPSHOT_NAME, true);
+    LOG.debug("Snapshot completed.");
+
+    // make sure we have the snapshot
+    List<SnapshotDescription> snapshots =
+        SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
+
+    // make sure its a valid snapshot
+    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+    LOG.debug("FS state after snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    SnapshotTestingUtils
+        .confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, admin, fs);
+
+    admin.deleteSnapshot(snapshot);
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+  }
+
+  /**
+   * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming
+   * the contents of the original and cloned table match
+   *
+   * @throws Exception if snapshot does not complete successfully
+   */
+  @Test(timeout = 180000) public void testSnapshotCloneContents() throws Exception {
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // put some stuff in the table
+    Table table = UTIL.getConnection().getTable(TABLE_NAME);
+    UTIL.loadTable(table, TEST_FAM);
+    table.close();
+
+    String snapshot1 = "TableSnapshot1";
+    takeSnapshot(TABLE_NAME, snapshot1, false);
+    LOG.debug("Snapshot1 completed.");
+
+    TableName clone = TableName.valueOf("Table1Clone");
+    admin.cloneSnapshot(snapshot1, clone, false);
+
+    Scan original = new Scan();
+    Scan cloned = new Scan();
+    ResultScanner originalScan = admin.getConnection().getTable(TABLE_NAME).getScanner(original);
+    ResultScanner clonedScan =
+        admin.getConnection().getTable(TableName.valueOf("Table1Clone")).getScanner(cloned);
+
+    Iterator<Result> i = originalScan.iterator();
+    Iterator<Result> i2 = clonedScan.iterator();
+    assertTrue(i.hasNext());
+    while (i.hasNext()) {
+      assertTrue(i2.hasNext());
+      assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})),
+          Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {})));
+    }
+    assertFalse(i2.hasNext());
+    admin.deleteSnapshot(snapshot1);
+    UTIL.deleteTable(clone);
+    admin.close();
+  }
+
+  @Test(timeout = 180000) public void testOfflineTableSnapshotWithEmptyRegion() throws Exception {
+    // test with an empty table with one region
+
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    LOG.debug("FS state before disable:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+    admin.disableTable(TABLE_NAME);
+
+    LOG.debug("FS state before snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    // take a snapshot of the disabled table
+    byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegion");
+    takeSnapshot(TABLE_NAME, Bytes.toString(snapshot), true);
+    LOG.debug("Snapshot completed.");
+
+    // make sure we have the snapshot
+    List<SnapshotDescription> snapshots =
+        SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
+
+    // make sure its a valid snapshot
+    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+    LOG.debug("FS state after snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
+    List<byte[]> nonEmptyCfs = Lists.newArrayList();
+    SnapshotTestingUtils
+        .confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs, rootDir,
+            admin, fs);
+
+    admin.deleteSnapshot(snapshot);
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+  }
+
+  // Ensures that the snapshot is transferred to the proper completed snapshot directory
+  @Test(timeout = 180000) public void testEnsureTemporaryDirectoryTransfer() throws Exception {
+    Admin admin = null;
+    TableName tableName2 = TableName.valueOf("testListTableSnapshots");
+    try {
+      admin = UTIL.getHBaseAdmin();
+
+      HTableDescriptor htd = new HTableDescriptor(tableName2);
+      UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
+
+      String table1Snapshot1 = "Table1Snapshot1";
+      takeSnapshot(TABLE_NAME, table1Snapshot1, false);
+      LOG.debug("Snapshot1 completed.");
+
+      String table1Snapshot2 = "Table1Snapshot2";
+      takeSnapshot(TABLE_NAME, table1Snapshot2, false);
+      LOG.debug("Snapshot2 completed.");
+
+      String table2Snapshot1 = "Table2Snapshot1";
+      takeSnapshot(TABLE_NAME, table2Snapshot1, false);
+      LOG.debug("Table2Snapshot1 completed.");
+
+      List<SnapshotDescription> listTableSnapshots = admin.listTableSnapshots("test.*", ".*");
+      List<String> listTableSnapshotNames = new ArrayList<String>();
+      assertEquals(3, listTableSnapshots.size());
+      for (SnapshotDescription s : listTableSnapshots) {
+        listTableSnapshotNames.add(s.getName());
+      }
+      assertTrue(listTableSnapshotNames.contains(table1Snapshot1));
+      assertTrue(listTableSnapshotNames.contains(table1Snapshot2));
+      assertTrue(listTableSnapshotNames.contains(table2Snapshot1));
+    } finally {
+      if (admin != null) {
+        try {
+          admin.deleteSnapshots("Table.*");
+        } catch (SnapshotDoesNotExistException ignore) {
+        }
+        if (admin.tableExists(tableName2)) {
+          UTIL.deleteTable(tableName2);
+        }
+        admin.close();
+      }
+    }
+  }
+
+  private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled)
+      throws IOException {
+    Type type = disabled ? Type.DISABLED : Type.FLUSH;
+    SnapshotDescription desc = SnapshotDescription.newBuilder()
+        .setTable(tableName.getNameAsString())
+        .setName(snapshotName)
+        .setVersion(manifestVersion)
+        .setType(type)
+        .build();
+    admin.snapshot(desc);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
new file mode 100644
index 0000000..c720b8e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestSnapshotTemporaryDirectoryWithRegionReplicas
+    extends TestSnapshotTemporaryDirectory {
+
+  @Override
+  protected int getNumReplicas() {
+    return 3;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
index 7d618c3..f509771 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
@@ -136,7 +136,8 @@ public class TestSnapshotHFileCleaner {
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+          TEST_UTIL.getConfiguration()), true);
     }
   }
 
@@ -163,7 +164,8 @@ public class TestSnapshotHFileCleaner {
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+          TEST_UTIL.getConfiguration()), true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 9847bc4..6820019 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -499,8 +499,8 @@ public final class SnapshotTestingUtils {
         this.htd = htd;
         this.desc = desc;
         this.tableRegions = tableRegions;
-        this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
-        new FSTableDescriptors(conf)
+        this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
+        new FSTableDescriptors(conf, snapshotDir.getFileSystem(conf), rootDir)
           .createTableDescriptorForTableDirectory(snapshotDir, htd, false);
       }
 
@@ -624,8 +624,11 @@ public final class SnapshotTestingUtils {
         SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor);
         manifest.addTableDescriptor(htd);
         manifest.consolidate();
-        SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir, fs);
-        snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
+        Path workingDir = snapshotDir;
+        Path completedSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
+        SnapshotDescriptionUtils.completeSnapshot(completedSnapshotDir, workingDir, fs,
+            workingDir.getFileSystem(conf), conf);
+        snapshotDir = completedSnapshotDir;
         return snapshotDir;
       }
 
@@ -679,8 +682,8 @@ public final class SnapshotTestingUtils {
         .setVersion(version)
         .build();
 
-      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
-      SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
+      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
+      SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, workingDir.getFileSystem(conf));
       return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
new file mode 100644
index 0000000..c8e0266
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.util.UUID;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({MediumTests.class})
+public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot {
+
+  protected static String TEMP_DIR = Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR
+      + UUID.randomUUID().toString();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    setUpBaseConf(TEST_UTIL.getConfiguration());
+    TEST_UTIL.startMiniCluster(3);
+    TEST_UTIL.startMiniMapReduceCluster();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TestExportSnapshot.tearDownAfterClass();
+    FileUtils.deleteDirectory(new File(TEMP_DIR));
+  }
+
+  public static void setUpBaseConf(Configuration conf) {
+    TestExportSnapshot.setUpBaseConf(conf);
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da4697b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
index 403b1e6..bd361ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
@@ -119,7 +119,7 @@ public class TestRegionSnapshotTask {
 
     final HRegion region = spy(hRegions.get(0));
 
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
     final SnapshotManifest manifest =
         SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
     manifest.addTableDescriptor(table.getTableDescriptor());
@@ -164,7 +164,7 @@ public class TestRegionSnapshotTask {
   private void addRegionToSnapshot(HBaseProtos.SnapshotDescription snapshot,
       HRegion region, SnapshotManifest manifest) throws Exception {
     LOG.info("Adding region to snapshot: " + region.getRegionInfo().getRegionNameAsString());
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
     SnapshotManifest.RegionVisitor visitor = createRegionVisitorWithDelay(snapshot, workingDir);
     manifest.addRegion(region, visitor);
     LOG.info("Added the region to snapshot: " + region.getRegionInfo().getRegionNameAsString());


[3/3] hbase git commit: HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

Posted by ap...@apache.org.
HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

Backport to branch-1

Signed-off-by: Duo Zhang <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/137423d7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/137423d7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/137423d7

Branch: refs/heads/branch-1.4
Commit: 137423d70341eb736493ce82f917e9f29d979e61
Parents: 8da4697
Author: Andrew Purtell <ap...@apache.org>
Authored: Mon Oct 15 10:53:28 2018 +0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon Oct 15 13:58:35 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/util/Random64.java  | 149 +++++++++++++++++++
 .../hadoop/hbase/chaos/actions/Action.java      |  17 +--
 .../test/IntegrationTestBigLinkedList.java      |  79 +++++++---
 3 files changed, 216 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/137423d7/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
new file mode 100644
index 0000000..95d5606
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
@@ -0,0 +1,149 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import com.google.common.base.Preconditions;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ *
+ * An instance of this class is used to generate a stream of
+ * pseudorandom numbers. The class uses a 64-bit seed, which is
+ * modified using a linear congruential formula.
+ *
+ * see https://en.wikipedia.org/wiki/Linear_congruential_generator
+ */
+@InterfaceAudience.Private
+public class Random64 {
+
+  private static final long multiplier = 6364136223846793005L;
+  private static final long addend = 1442695040888963407L;
+
+  private static final AtomicLong seedUniquifier
+        = new AtomicLong(8682522807148012L);
+
+  private long seed;
+
+  /**
+   * Copy from {@link Random#seedUniquifier()}
+   */
+  private static long seedUniquifier() {
+    for (; ; ) {
+      long current = seedUniquifier.get();
+      long next = current * 181783497276652981L;
+      if (seedUniquifier.compareAndSet(current, next)) {
+        return next;
+      }
+    }
+  }
+
+  public Random64() {
+    this(seedUniquifier() ^ System.nanoTime());
+  }
+
+  public Random64(long seed) {
+    this.seed = seed;
+  }
+
+  public long nextLong() {
+    return next64(64);
+  }
+
+  public void nextBytes(byte[] bytes) {
+    for (int i = 0, len = bytes.length; i < len;) {
+      // We regard seed as unsigned long, therefore used '>>>' instead of '>>'.
+      for (long rnd = nextLong(), n = Math.min(len - i, Long.SIZE / Byte.SIZE);
+           n-- > 0; rnd >>>= Byte.SIZE) {
+        bytes[i++] = (byte) rnd;
+      }
+    }
+  }
+
+  private long next64(int bits) {
+    seed = seed * multiplier + addend;
+    return seed >>> (64 - bits);
+  }
+
+
+  /**
+   * Random64 is a pseudorandom algorithm(LCG). Therefore, we will get same sequence
+   * if seeds are the same. This main will test how many calls nextLong() it will
+   * get the same seed.
+   *
+   * We do not need to save all numbers (that is too large). We could save
+   * once every 100000 calls nextLong(). If it get a same seed, we can
+   * detect this by calling nextLong() 100000 times continuously.
+   *
+   */
+  public static void main(String[] args) {
+    long defaultTotalTestCnt = 1000000000000L; // 1 trillion
+
+    if (args.length == 1) {
+      defaultTotalTestCnt = Long.parseLong(args[0]);
+    }
+
+    Preconditions.checkArgument(defaultTotalTestCnt > 0, "totalTestCnt <= 0");
+
+    final int precision = 100000;
+    final long totalTestCnt = defaultTotalTestCnt + precision;
+    final int reportPeriod = 100 * precision;
+    final long startTime = System.currentTimeMillis();
+
+    System.out.println("Do collision test, totalTestCnt=" + totalTestCnt);
+
+    Random64 rand = new Random64();
+    Set<Long> longSet = new HashSet<>();
+
+    for (long cnt = 1; cnt <= totalTestCnt; cnt++) {
+      final long randLong = rand.nextLong();
+
+      if (longSet.contains(randLong)) {
+        System.err.println("Conflict! count=" + cnt);
+        System.exit(1);
+      }
+
+      if (cnt % precision == 0) {
+        if (!longSet.add(randLong)) {
+          System.err.println("Conflict! count=" + cnt);
+          System.exit(1);
+        }
+
+        if (cnt % reportPeriod == 0) {
+          long cost = System.currentTimeMillis() - startTime;
+          long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt);
+          System.out.println(
+            String.format(
+              "Progress: %.3f%%, remaining %d minutes",
+              100.0 * cnt / totalTestCnt, remainingMs / 60000
+            )
+          );
+        }
+      }
+
+    }
+
+    System.out.println("No collision!");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/137423d7/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
index fe140e2..1b842be 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
@@ -21,8 +21,10 @@ package org.apache.hadoop.hbase.chaos.actions;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.commons.lang.math.RandomUtils;
 import org.apache.commons.logging.Log;
@@ -117,16 +119,13 @@ public class Action {
       return new ServerName [] {};
     }
     ServerName master = clusterStatus.getMaster();
-    if (master == null || !regionServers.contains(master)) {
-      return regionServers.toArray(new ServerName[count]);
-    }
-    if (count == 1) {
-      return new ServerName [] {};
-    }
-    ArrayList<ServerName> tmp = new ArrayList<ServerName>(count);
+    Set<ServerName> masters = new HashSet<ServerName>();
+    masters.add(master);
+    masters.addAll(clusterStatus.getBackupMasters());
+    ArrayList<ServerName> tmp = new ArrayList<>(count);
     tmp.addAll(regionServers);
-    tmp.remove(master);
-    return tmp.toArray(new ServerName[count-1]);
+    tmp.removeAll(masters);
+    return tmp.toArray(new ServerName[tmp.size()]);
   }
 
   protected void killMaster(ServerName server) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/137423d7/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 0e3b198..b48f1a0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hbase.test;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.FileNotFoundException;
@@ -35,7 +37,6 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -56,20 +57,18 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.IntegrationTestBase;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.hbase.testclassification.IntegrationTests;
-import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionConfiguration;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -77,6 +76,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.ScannerCallable;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
@@ -85,8 +86,10 @@ import org.apache.hadoop.hbase.mapreduce.WALPlayer;
 import org.apache.hadoop.hbase.regionserver.FlushLargeStoresPolicy;
 import org.apache.hadoop.hbase.regionserver.FlushPolicyFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Random64;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.io.BytesWritable;
@@ -119,8 +122,6 @@ import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.collect.Sets;
-
 /**
  * This is an integration test borrowed from goraci, written by Keith Turner,
  * which is in turn inspired by the Accumulo test called continous ingest (ci).
@@ -268,6 +269,15 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
     public static final String MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY =
         "generator.multiple.columnfamilies";
 
+    /**
+     * Set this configuration if you want to scale up the size of test data quickly.
+     * <p>
+     * $ ./bin/hbase org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList
+     * -Dgenerator.big.family.value.size=1024 generator 1 10 output
+     */
+    public static final String BIG_FAMILY_VALUE_SIZE_KEY = "generator.big.family.value.size";
+
+
     public static enum Counts {
       SUCCESS, TERMINATING, UNDEFINED, IOEXCEPTION
     }
@@ -301,7 +311,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       static class GeneratorRecordReader extends RecordReader<BytesWritable,NullWritable> {
         private long count;
         private long numNodes;
-        private Random rand;
+        private Random64 rand;
 
         @Override
         public void close() throws IOException {
@@ -328,8 +338,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         public void initialize(InputSplit arg0, TaskAttemptContext context)
             throws IOException, InterruptedException {
           numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000);
-          // Use SecureRandom to avoid issue described in HBASE-13382.
-          rand = new SecureRandom();
+          // Use Random64 to avoid issue described in HBASE-21256.
+          rand = new Random64();
         }
 
         @Override
@@ -439,6 +449,36 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         this.numWalkers = context.getConfiguration().getInt(CONCURRENT_WALKER_KEY, CONCURRENT_WALKER_DEFAULT);
         this.walkersStop = false;
         this.conf = context.getConfiguration();
+
+        if (multipleUnevenColumnFamilies) {
+          int n = context.getConfiguration().getInt(BIG_FAMILY_VALUE_SIZE_KEY, 256);
+          int limit = context.getConfiguration().getInt(
+            ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY,
+            ConnectionConfiguration.MAX_KEYVALUE_SIZE_DEFAULT);
+
+          Preconditions.checkArgument(
+            n <= limit,
+            "%s(%s) > %s(%s)",
+            BIG_FAMILY_VALUE_SIZE_KEY, n, ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, limit);
+
+          bigValue = new byte[n];
+          ThreadLocalRandom.current().nextBytes(bigValue);
+          LOG.info("Create a bigValue with " + n + " bytes.");
+        }
+
+        Preconditions.checkArgument(
+          numNodes > 0,
+          "numNodes(%s) <= 0",
+          numNodes);
+        Preconditions.checkArgument(
+          numNodes % width == 0,
+          "numNodes(%s) mod width(%s) != 0",
+          numNodes, width);
+        Preconditions.checkArgument(
+          numNodes % wrap == 0,
+          "numNodes(%s) mod wrap(%s) != 0",
+          numNodes, wrap
+        );
       }
 
       protected void instantiateHTable() throws IOException {
@@ -459,8 +499,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         current[i] = new byte[key.getLength()];
         System.arraycopy(key.getBytes(), 0, current[i], 0, key.getLength());
         if (++i == current.length) {
-          LOG.info("Persisting current.length=" + current.length + ", count=" + count + ", id=" +
-            Bytes.toStringBinary(id) + ", current=" + Bytes.toStringBinary(current[0]) +
+          LOG.debug("Persisting current.length=" + current.length + ", count=" + count +
+            ", id=" + Bytes.toStringBinary(id) + ", current=" + Bytes.toStringBinary(current[0]) +
             ", i=" + i);
           persist(output, count, prev, current, id);
           i = 0;
@@ -528,11 +568,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
           if (this.multipleUnevenColumnFamilies) {
             // Use any column name.
             put.addColumn(TINY_FAMILY_NAME, TINY_FAMILY_NAME, this.tinyValue);
-            // If we've not allocated bigValue, do it now. Reuse same value each time.
-            if (this.bigValue == null) {
-              this.bigValue = new byte[current[i].length * 10];
-              ThreadLocalRandom.current().nextBytes(this.bigValue);
-            }
             // Use any column name.
             put.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME, this.bigValue);
           }
@@ -752,6 +787,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
       FileOutputFormat.setOutputPath(job, tmpOutput);
       job.setOutputFormatClass(SequenceFileOutputFormat.class);
+      TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Random64.class);
 
       boolean success = jobCompletion(job);
 
@@ -783,7 +819,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       job.getConfiguration().setBoolean("mapreduce.map.speculative", false);
       TableMapReduceUtil.addDependencyJars(job);
       TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
-                                                     AbstractHBaseTool.class);
+          AbstractHBaseTool.class);
       TableMapReduceUtil.initCredentials(job);
 
       boolean success = jobCompletion(job);
@@ -1144,13 +1180,14 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
         // TODO check for more than one def, should not happen
         StringBuilder refsSb = null;
-        String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
         if (defCount == 0 || refs.size() != 1) {
+          String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
           refsSb = dumpExtraInfoOnRefs(key, context, refs);
           LOG.error("LinkedListError: key=" + keyString + ", reference(s)=" +
             (refsSb != null? refsSb.toString(): ""));
         }
         if (lostFamilies) {
+          String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
           LOG.error("LinkedListError: key=" + keyString + ", lost big or tiny families");
           context.getCounter(Counts.LOST_FAMILIES).increment(1);
           context.write(key, LOSTFAM);
@@ -1177,6 +1214,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
             // was added which can help a little debugging. This info is only available in mapper
             // output -- the 'Linked List error Key...' log message above. What we emit here is
             // useless for debugging.
+            String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
             context.getCounter("undef", keyString).increment(1);
           }
         } else if (defCount > 0 && refs.size() == 0) {
@@ -1184,6 +1222,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
           context.write(key, UNREF);
           context.getCounter(Counts.UNREFERENCED).increment(1);
           if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
+            String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
             context.getCounter("unref", keyString).increment(1);
           }
         } else {
@@ -1296,7 +1335,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       TableMapReduceUtil.initTableMapperJob(getTableName(getConf()).getName(), scan,
           VerifyMapper.class, BytesWritable.class, BytesWritable.class, job);
       TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
-                                                     AbstractHBaseTool.class);
+          AbstractHBaseTool.class);
 
       job.getConfiguration().setBoolean("mapreduce.map.speculative", false);