You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zy...@apache.org on 2018/10/12 18:13:19 UTC

[2/2] hbase git commit: HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

Test fixes by: Taklon Wu <wu...@amazon.com>

Signed-off-by: Zach York <zy...@apache.org>
Signed-off-by: Mingliang Liu <li...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d385ba1a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d385ba1a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d385ba1a

Branch: refs/heads/branch-1
Commit: d385ba1a5dfb17ed40edfb0650d685da66615b43
Parents: b73aab8
Author: Tyler Mi <mt...@amazon.com>
Authored: Tue Sep 4 16:44:59 2018 -0700
Committer: Zach York <zy...@amazon.com>
Committed: Fri Oct 12 11:13:16 2018 -0700

----------------------------------------------------------------------
 .../src/main/resources/hbase-default.xml        |   9 +
 .../snapshot/DisabledTableSnapshotHandler.java  |   5 +-
 .../snapshot/EnabledTableSnapshotHandler.java   |   2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |  21 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  37 +-
 .../master/snapshot/TakeSnapshotHandler.java    |  58 ++-
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |   8 +-
 .../snapshot/SnapshotDescriptionUtils.java      | 138 +++++-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  57 ++-
 .../hbase/snapshot/SnapshotManifestV1.java      |  15 +-
 .../hbase/snapshot/SnapshotManifestV2.java      |  15 +-
 .../TestSnapshotDFSTemporaryDirectory.java      |  73 +++
 .../client/TestSnapshotTemporaryDirectory.java  | 468 +++++++++++++++++++
 ...hotTemporaryDirectoryWithRegionReplicas.java |  31 ++
 .../snapshot/TestSnapshotHFileCleaner.java      |   6 +-
 .../hbase/snapshot/SnapshotTestingUtils.java    |  15 +-
 ...estExportSnapshotWithTemporaryDirectory.java |  54 +++
 .../hbase/snapshot/TestRegionSnapshotTask.java  |   4 +-
 .../snapshot/TestSnapshotDescriptionUtils.java  | 113 ++++-
 20 files changed, 1007 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index b4d095b..78237b1 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1244,6 +1244,15 @@ possible configurations would overwhelm and obscure the important.
       to create a name based on what you are restoring.</description>
   </property>
   <property>
+    <name>hbase.snapshot.working.dir</name>
+    <value></value>
+    <description>Location where the snapshotting process will occur. The location of the
+      completed snapshots will not change, but the temporary directory where the snapshot
+      process occurs will be set to this location. This can be a separate filesystem than
+      the root directory, for performance increase purposes. See HBASE-21098 for more
+      information</description>
+  </property>
+  <property>
     <name>hbase.server.compactchecker.interval.multiplier</name>
     <value>1000</value>
     <description>The number that determines how often we scan to see if compaction is necessary.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
index 15fa0c5..dc9394e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
@@ -53,9 +53,12 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
   /**
    * @param snapshot descriptor of the snapshot to take
    * @param masterServices master services provider
+   * @throws IOException if it cannot access the filesystem of the snapshot
+   *         temporary directory
    */
   public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
-      final MasterServices masterServices, final SnapshotManager snapshotManager) {
+      final MasterServices masterServices, final SnapshotManager snapshotManager)
+      throws IOException {
     super(snapshot, masterServices, snapshotManager);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
index fc58455..f424c38 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
@@ -50,7 +50,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
   private final ProcedureCoordinator coordinator;
 
   public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
-                                     final SnapshotManager manager) {
+      final SnapshotManager manager) throws IOException {
     super(snapshot, master, manager);
     this.coordinator = manager.getCoordinator();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index bb54fc3..dee5293 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 /**
@@ -77,21 +78,20 @@ public final class MasterSnapshotVerifier {
   private static final Log LOG = LogFactory.getLog(MasterSnapshotVerifier.class);
 
   private SnapshotDescription snapshot;
-  private FileSystem fs;
-  private Path rootDir;
+  private FileSystem workingDirFs;
   private TableName tableName;
   private MasterServices services;
 
   /**
    * @param services services for the master
    * @param snapshot snapshot to check
-   * @param rootDir root directory of the hbase installation.
+   * @param workingDirFs the file system containing the temporary snapshot information
    */
-  public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) {
-    this.fs = services.getMasterFileSystem().getFileSystem();
+  public MasterSnapshotVerifier(MasterServices services,
+      SnapshotDescription snapshot, FileSystem workingDirFs) {
+    this.workingDirFs = workingDirFs;
     this.services = services;
     this.snapshot = snapshot;
-    this.rootDir = rootDir;
     this.tableName = TableName.valueOf(snapshot.getTable());
   }
 
@@ -105,7 +105,7 @@ public final class MasterSnapshotVerifier {
    */
   public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers)
       throws CorruptedSnapshotException, IOException {
-    SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs,
+    SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), workingDirFs,
                                                       snapshotDir, snapshot);
     // verify snapshot info matches
     verifySnapshotDescription(snapshotDir);
@@ -122,7 +122,8 @@ public final class MasterSnapshotVerifier {
    * @param snapshotDir snapshot directory to check
    */
   private void verifySnapshotDescription(Path snapshotDir) throws CorruptedSnapshotException {
-    SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+    SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(workingDirFs,
+        snapshotDir);
     if (!this.snapshot.equals(found)) {
       throw new CorruptedSnapshotException("Snapshot read (" + found
           + ") doesn't equal snapshot we ran (" + snapshot + ").", snapshot);
@@ -195,7 +196,9 @@ public final class MasterSnapshotVerifier {
     }
 
     // Verify Snapshot HFiles
-    SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), fs, manifest);
+    // Requires the root directory file system as HFiles are stored in the root directory
+    SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(),
+        FSUtils.getRootDirFileSystem(services.getConfiguration()), manifest);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index b12debf..bc0aa8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -264,11 +264,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    */
   void resetTempDir() throws IOException {
     // cleanup any existing snapshots.
-    Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir);
-    if (master.getMasterFileSystem().getFileSystem().exists(tmpdir)) {
-      if (!master.getMasterFileSystem().getFileSystem().delete(tmpdir, true)) {
-        LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
-      }
+    Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+        master.getConfiguration());
+    FileSystem tmpFs = tmpdir.getFileSystem(master.getConfiguration());
+    if (!tmpFs.delete(tmpdir, true)) {
+      LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
     }
   }
 
@@ -419,8 +419,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    */
   private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot)
       throws HBaseSnapshotException {
-    FileSystem fs = master.getMasterFileSystem().getFileSystem();
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
+        master.getConfiguration());
     TableName snapshotTable =
         TableName.valueOf(snapshot.getTable());
 
@@ -445,12 +445,13 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     }
 
     try {
+      FileSystem workingDirFS = workingDir.getFileSystem(master.getConfiguration());
       // delete the working directory, since we aren't running the snapshot. Likely leftovers
       // from a failed attempt.
-      fs.delete(workingDir, true);
+      workingDirFS.delete(workingDir, true);
 
       // recreate the working directory for the snapshot
-      if (!fs.mkdirs(workingDir)) {
+      if (!workingDirFS.mkdirs(workingDir)) {
         throw new SnapshotCreationException("Couldn't create working directory (" + workingDir
             + ") for snapshot" , snapshot);
       }
@@ -465,10 +466,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   /**
    * Take a snapshot of a disabled table.
    * @param snapshot description of the snapshot to take. Modified to be {@link Type#DISABLED}.
-   * @throws HBaseSnapshotException if the snapshot could not be started
+   * @throws IOException if the snapshot could not be started or filesystem for snapshot
+   *         temporary directory could not be determined
    */
   private synchronized void snapshotDisabledTable(SnapshotDescription snapshot)
-      throws HBaseSnapshotException {
+      throws IOException {
     // setup the snapshot
     prepareToTakeSnapshot(snapshot);
 
@@ -484,10 +486,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   /**
    * Take a snapshot of an enabled table.
    * @param snapshot description of the snapshot to take.
-   * @throws HBaseSnapshotException if the snapshot could not be started
+   * @throws IOException if the snapshot could not be started or filesystem for snapshot
+   *         temporary directory could not be determined
    */
   private synchronized void snapshotEnabledTable(SnapshotDescription snapshot)
-      throws HBaseSnapshotException {
+          throws IOException {
     // setup the snapshot
     prepareToTakeSnapshot(snapshot);
 
@@ -506,16 +509,18 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    * @param handler the snapshot handler
    */
   private synchronized void snapshotTable(SnapshotDescription snapshot,
-      final TakeSnapshotHandler handler) throws HBaseSnapshotException {
+      final TakeSnapshotHandler handler) throws IOException {
     try {
       handler.prepare();
       this.executorService.submit(handler);
       this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
     } catch (Exception e) {
       // cleanup the working directory by trying to delete it from the fs.
-      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
+          master.getConfiguration());
+      FileSystem workingDirFs = workingDir.getFileSystem(master.getConfiguration());
       try {
-        if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
+        if (!workingDirFs.delete(workingDir, true)) {
           LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
               ClientSnapshotDescriptionUtils.toString(snapshot));
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 73b5675..e9d415b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.snapshot;
 
+import com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.HashSet;
@@ -77,7 +78,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
   protected final SnapshotDescription snapshot;
   protected final Configuration conf;
-  protected final FileSystem fs;
+  protected final FileSystem rootFs;
+  protected final FileSystem workingDirFs;
   protected final Path rootDir;
   private final Path snapshotDir;
   protected final Path workingDir;
@@ -95,24 +97,33 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   /**
    * @param snapshot descriptor of the snapshot to take
    * @param masterServices master services provider
+   * @throws IllegalArgumentException if the working snapshot directory set from the
+   *   configuration is the same as the completed snapshot directory
+   * @throws IOException if the file system of the working snapshot directory cannot be
+   *   determined
    */
   public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices,
-                             final SnapshotManager snapshotManager) {
+                             final SnapshotManager snapshotManager) throws IOException {
     super(masterServices, EventType.C_M_SNAPSHOT_TABLE);
     assert snapshot != null : "SnapshotDescription must not be nul1";
     assert masterServices != null : "MasterServices must not be nul1";
-
     this.master = masterServices;
+    this.conf = this.master.getConfiguration();
+    this.rootDir = this.master.getMasterFileSystem().getRootDir();
+    this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
+    Preconditions.checkArgument(!SnapshotDescriptionUtils.isSubDirectoryOf(workingDir, rootDir) ||
+            SnapshotDescriptionUtils.isWithinDefaultWorkingDir(workingDir, conf),
+        "The working directory " + workingDir + " cannot be in the root directory unless it is "
+            + "within the default working directory");
+
     this.snapshot = snapshot;
     this.snapshotManager = snapshotManager;
     this.snapshotTable = TableName.valueOf(snapshot.getTable());
-    this.conf = this.master.getConfiguration();
-    this.fs = this.master.getMasterFileSystem().getFileSystem();
-    this.rootDir = this.master.getMasterFileSystem().getRootDir();
+    this.rootFs = this.master.getMasterFileSystem().getFileSystem();
     this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
-    this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    this.workingDirFs = this.workingDir.getFileSystem(this.conf);
     this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
-    this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
+    this.snapshotManifest = SnapshotManifest.create(conf, rootFs, workingDir, snapshot, monitor);
 
     this.tableLockManager = master.getTableLockManager();
     this.tableLock = this.tableLockManager.writeLock(
@@ -120,7 +131,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
         EventType.C_M_SNAPSHOT_TABLE.toString());
 
     // prepare the verify
-    this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir);
+    this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, workingDirFs);
     // update the running tasks
     this.status = TaskMonitor.get().createStatus(
       "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
@@ -172,7 +183,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       // an external exception that gets captured here.
 
       // write down the snapshot info in the working directory
-      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
+      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
       snapshotManifest.addTableDescriptor(this.htd);
       monitor.rethrowException();
 
@@ -208,7 +219,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       verifier.verifySnapshot(this.workingDir, serverNames);
 
       // complete the snapshot, atomically moving from tmp to .snapshot dir.
-      completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
+      completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs);
       msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
       status.markComplete(msg);
       LOG.info(msg);
@@ -228,7 +239,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       try {
         // if the working dir is still present, the snapshot has failed.  it is present we delete
         // it.
-        if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) {
+        if (!workingDirFs.delete(workingDir, true)) {
           LOG.error("Couldn't delete snapshot working directory:" + workingDir);
         }
       } catch (IOException e) {
@@ -250,22 +261,21 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   }
 
   /**
-   * Reset the manager to allow another snapshot to proceed
+   * Reset the manager to allow another snapshot to proceed.
+   * Commits the snapshot process by moving the working snapshot
+   * to the finalized filepath
+   *
+   * @param snapshotDir The file path of the completed snapshots
+   * @param workingDir  The file path of the in progress snapshots
+   * @param fs The file system of the completed snapshots
+   * @param workingDirFs The file system of the in progress snapshots
    *
-   * @param snapshotDir final path of the snapshot
-   * @param workingDir directory where the in progress snapshot was built
-   * @param fs {@link FileSystem} where the snapshot was built
    * @throws SnapshotCreationException if the snapshot could not be moved
    * @throws IOException the filesystem could not be reached
    */
-  public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs)
-      throws SnapshotCreationException, IOException {
-    LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to "
-        + snapshotDir);
-    if (!fs.rename(workingDir, snapshotDir)) {
-      throw new SnapshotCreationException("Failed to move working directory(" + workingDir
-          + ") to completed directory(" + snapshotDir + ").");
-    }
+  public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
+      FileSystem workingDirFs) throws SnapshotCreationException, IOException {
+    SnapshotDescriptionUtils.completeSnapshot(snapshotDir, workingDir, fs, workingDirFs, conf);
     finished = true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 90a5f92..85f5b73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3955,7 +3955,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public void addRegionToSnapshot(SnapshotDescription desc,
       ForeignExceptionSnare exnSnare) throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
-    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
+    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
 
     SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
                                                         snapshotDir, desc, exnSnare);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index 0824189..3b9c3a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -922,10 +922,12 @@ public class ExportSnapshot extends Configured implements Tool {
     FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
     LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString());
 
-    boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false);
+    boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) ||
+        conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
 
     Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
-    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot);
+    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot,
+        destConf);
     Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
     Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
 
@@ -935,7 +937,7 @@ public class ExportSnapshot extends Configured implements Tool {
       if (skipTmp) {
         needSetOwnerDir = outputSnapshotDir;
       } else {
-        needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot);
+        needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
         if (outputFs.exists(needSetOwnerDir)) {
           needSetOwnerDir = snapshotTmpDir;
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index cdc1b1f..722bb23 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hbase.snapshot;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
+import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collections;
 
@@ -28,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
@@ -112,6 +115,12 @@ public final class SnapshotDescriptionUtils {
   /** Temporary directory under the snapshot directory to store in-progress snapshots */
   public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
 
+  /**
+   * The configuration property that determines the filepath of the snapshot
+   * base working directory
+   */
+  public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
+
   /** This tag will be created in in-progess snapshots */
   public static final String SNAPSHOT_IN_PROGRESS = ".inprogress";
   // snapshot operation values
@@ -191,46 +200,52 @@ public final class SnapshotDescriptionUtils {
    * @return the final directory for the completed snapshot
    */
   public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
-    return getCompletedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
+    return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
   }
 
   /**
    * Get the general working directory for snapshots - where they are built, where they are
    * temporarily copied on export, etc.
    * @param rootDir root directory of the HBase installation
+   * @param conf Configuration of the HBase instance
    * @return Path to the snapshot tmp directory, relative to the passed root directory
    */
-  public static Path getWorkingSnapshotDir(final Path rootDir) {
-    return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
+  public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) {
+    return new Path(conf.get(SNAPSHOT_WORKING_DIR,
+        getDefaultWorkingSnapshotDir(rootDir).toString()));
   }
 
   /**
    * Get the directory to build a snapshot, before it is finalized
    * @param snapshot snapshot that will be built
    * @param rootDir root directory of the hbase installation
+   * @param conf Configuration of the HBase instance
    * @return {@link Path} where one can build a snapshot
    */
-  public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir) {
-    return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshot.getName());
+  public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir,
+      Configuration conf) {
+    return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf);
   }
 
   /**
    * Get the directory to build a snapshot, before it is finalized
    * @param snapshotName name of the snapshot
    * @param rootDir root directory of the hbase installation
+   * @param conf Configuration of the HBase instance
    * @return {@link Path} where one can build a snapshot
    */
-  public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir) {
-    return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshotName);
+  public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
+      Configuration conf) {
+    return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
   }
 
   /**
-   * Get the directory to store the snapshot instance
-   * @param snapshotsDir hbase-global directory for storing all snapshots
+   * Get the directory within the given filepath to store the snapshot instance
+   * @param snapshotsDir directory to store snapshot directory within
    * @param snapshotName name of the snapshot to take
-   * @return the final directory for the completed snapshot
+   * @return the final directory for the snapshot in the given filepath
    */
-  private static final Path getCompletedSnapshotDir(final Path snapshotsDir, String snapshotName) {
+  private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
     return new Path(snapshotsDir, snapshotName);
   }
 
@@ -243,6 +258,40 @@ public final class SnapshotDescriptionUtils {
   }
 
   /**
+   * Determines if the given workingDir is a subdirectory of the given "root directory"
+   * @param workingDir a directory to check
+   * @param rootDir root directory of the HBase installation
+   * @return true if the given workingDir is a subdirectory of the given root directory,
+   *   false otherwise
+   */
+  public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) {
+    return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR);
+  }
+
+  /**
+   * Determines if the given workingDir is a subdirectory of the default working snapshot directory
+   * @param workingDir a directory to check
+   * @param conf configuration for the HBase cluster
+   * @return true if the given workingDir is a subdirectory of the default working directory for
+   *   snapshots, false otherwise
+   */
+  public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf)
+      throws IOException {
+    Path defaultWorkingDir = getDefaultWorkingSnapshotDir(FSUtils.getRootDir(conf));
+    return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
+  }
+
+  /**
+   * Get the default working directory for snapshots - where they are built, where they are
+   * temporarily copied on export, etc.
+   * @param rootDir root directory of the HBase installation
+   * @return Path to the default snapshot tmp directory, relative to the passed root directory
+   */
+  private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
+    return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
+  }
+
+  /**
    * Convert the passed snapshot description into a 'full' snapshot description based on default
    * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't
    * supplied to their default values.
@@ -346,25 +395,70 @@ public final class SnapshotDescriptionUtils {
   /**
    * Move the finished snapshot to its final, publicly visible directory - this marks the snapshot
    * as 'complete'.
-   * @param snapshot description of the snapshot being tabken
-   * @param rootdir root directory of the hbase installation
-   * @param workingDir directory where the in progress snapshot was built
-   * @param fs {@link FileSystem} where the snapshot was built
+   * @param snapshotDir The file path of the completed snapshots
+   * @param workingDir The file path of the in progress snapshots
+   * @param fs {@link FileSystem} The file system of the completed snapshots
+   * @param workingDirFs The file system of the in progress snapshots
    * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if the
    * snapshot could not be moved
    * @throws IOException the filesystem could not be reached
    */
-  public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir,
-      FileSystem fs) throws SnapshotCreationException, IOException {
-    Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir);
+  public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
+      FileSystem workingDirFs, Configuration conf) throws SnapshotCreationException, IOException {
     LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to "
-        + finishedDir);
-    if (!fs.rename(workingDir, finishedDir)) {
-      throw new SnapshotCreationException("Failed to move working directory(" + workingDir
-          + ") to completed directory(" + finishedDir + ").", snapshot);
+        + snapshotDir);
+    if (!workingDirFs.exists(workingDir)) {
+      throw new IOException("Failed to moving nonexistent snapshot working directory "
+          + workingDir + " to " + snapshotDir);
+    }
+    // If the working and completed snapshot directory are on the same file system, attempt
+    // to rename the working snapshot directory to the completed location. If that fails,
+    // or the file systems differ, attempt to copy the directory over, throwing an exception
+    // if this fails
+    URI workingURI = workingDirFs.getUri();
+    URI rootURI = fs.getUri();
+
+    if ((shouldSkipRenameSnapshotDirectories(workingURI, rootURI)
+        || !fs.rename(workingDir, snapshotDir))
+         && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf)) {
+      throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
+          + ") to completed directory(" + snapshotDir + ").");
     }
   }
 
+  @VisibleForTesting
+  static boolean shouldSkipRenameSnapshotDirectories(URI workingURI, URI rootURI) {
+    // check scheme, e.g. file, hdfs
+    if (workingURI.getScheme() == null &&
+        (rootURI.getScheme() != null && !rootURI.getScheme().equalsIgnoreCase("file"))) {
+      return true;
+    }
+    if (workingURI.getScheme() != null && !workingURI.getScheme().equals(rootURI.getScheme())) {
+      return true;
+    }
+
+    // check Authority, e.g. localhost:port
+    if (workingURI.getAuthority() == null && rootURI.getAuthority() != null) {
+      return true;
+    }
+    if (workingURI.getAuthority() != null &&
+        !workingURI.getAuthority().equals(rootURI.getAuthority())) {
+      return true;
+    }
+
+    // check UGI/userInfo
+    if (workingURI.getUserInfo() == null && rootURI.getUserInfo() != null) {
+      return true;
+    }
+    if (workingURI.getUserInfo() != null &&
+        !workingURI.getUserInfo().equals(rootURI.getUserInfo())) {
+      return true;
+    }
+
+    return false;
+  }
+
+
   /**
    * Check if the user is this table snapshot's owner
    * @param snapshot the table snapshot description

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 7822fa4..8a73773 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -79,18 +79,29 @@ public final class SnapshotManifest {
   private final ForeignExceptionSnare monitor;
   private final Configuration conf;
   private final Path workingDir;
-  private final FileSystem fs;
+  private final FileSystem rootFs;
+  private final FileSystem workingDirFs;
   private int manifestSizeLimit;
 
-  private SnapshotManifest(final Configuration conf, final FileSystem fs,
+  /**
+   *
+   * @param conf configuration file for HBase setup
+   * @param rootFs root filesystem containing HFiles
+   * @param workingDir file path of where the manifest should be located
+   * @param desc description of snapshot being taken
+   * @param monitor monitor of foreign exceptions
+   * @throws IOException if the working directory file system cannot be
+   *                     determined from the config file
+   */
+  private SnapshotManifest(final Configuration conf, final FileSystem rootFs,
       final Path workingDir, final SnapshotDescription desc,
-      final ForeignExceptionSnare monitor) {
+      final ForeignExceptionSnare monitor) throws IOException {
     this.monitor = monitor;
     this.desc = desc;
     this.workingDir = workingDir;
     this.conf = conf;
-    this.fs = fs;
-
+    this.rootFs = rootFs;
+    this.workingDirFs = this.workingDir.getFileSystem(this.conf);
     this.manifestSizeLimit = conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
   }
 
@@ -109,7 +120,7 @@ public final class SnapshotManifest {
    */
   public static SnapshotManifest create(final Configuration conf, final FileSystem fs,
       final Path workingDir, final SnapshotDescription desc,
-      final ForeignExceptionSnare monitor) {
+      final ForeignExceptionSnare monitor) throws IOException {
     return new SnapshotManifest(conf, fs, workingDir, desc, monitor);
 
   }
@@ -152,9 +163,9 @@ public final class SnapshotManifest {
   private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION:
-        return new SnapshotManifestV1.ManifestBuilder(conf, fs, workingDir);
+        return new SnapshotManifestV1.ManifestBuilder(conf, rootFs, workingDir);
       case SnapshotManifestV2.DESCRIPTOR_VERSION:
-        return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir);
+        return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir);
       default:
         throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc);
     }
@@ -223,7 +234,7 @@ public final class SnapshotManifest {
       throws IOException {
 
     // Open the RegionFS
-    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
+    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs,
         tableDir, regionInfo, true);
     monitor.rethrowException();
 
@@ -283,11 +294,11 @@ public final class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
-            SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+            SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc);
         } finally {
           tpool.shutdown();
         }
@@ -304,9 +315,10 @@ public final class SnapshotManifest {
           List<SnapshotRegionManifest> v1Regions, v2Regions;
           ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
           try {
-            v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
-            v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
-                manifestSizeLimit);
+            v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs,
+                workingDir, desc);
+            v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs,
+                workingDir, desc, manifestSizeLimit);
           } catch (InvalidProtocolBufferException e) {
             throw new CorruptedSnapshotException("unable to parse region manifest " +
                 e.getMessage(), e);
@@ -380,7 +392,7 @@ public final class SnapshotManifest {
       Path rootDir = FSUtils.getRootDir(conf);
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
-      new FSTableDescriptors(conf, fs, rootDir)
+      new FSTableDescriptors(conf, workingDirFs, rootDir)
         .createTableDescriptorForTableDirectory(workingDir, htd, false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest");
@@ -397,9 +409,10 @@ public final class SnapshotManifest {
     List<SnapshotRegionManifest> v1Regions, v2Regions;
     ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
     try {
-      v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
-      v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
-          manifestSizeLimit);
+      v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs,
+          workingDir, desc);
+      v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs,
+          workingDir, desc, manifestSizeLimit);
     } finally {
       tpool.shutdown();
     }
@@ -429,12 +442,12 @@ public final class SnapshotManifest {
     // them we will get the same information.
     if (v1Regions != null && v1Regions.size() > 0) {
       for (SnapshotRegionManifest regionManifest: v1Regions) {
-        SnapshotManifestV1.deleteRegionManifest(fs, workingDir, regionManifest);
+        SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
       }
     }
     if (v2Regions != null && v2Regions.size() > 0) {
       for (SnapshotRegionManifest regionManifest: v2Regions) {
-        SnapshotManifestV2.deleteRegionManifest(fs, workingDir, regionManifest);
+        SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
       }
     }
   }
@@ -444,7 +457,7 @@ public final class SnapshotManifest {
    */
   private void writeDataManifest(final SnapshotDataManifest manifest)
       throws IOException {
-    FSDataOutputStream stream = fs.create(new Path(workingDir, DATA_MANIFEST_NAME));
+    FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME));
     try {
       manifest.writeTo(stream);
     } finally {
@@ -458,7 +471,7 @@ public final class SnapshotManifest {
   private SnapshotDataManifest readDataManifest() throws IOException {
     FSDataInputStream in = null;
     try {
-      in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME));
+      in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME));
       CodedInputStream cin = CodedInputStream.newInstance(in);
       cin.setSizeLimit(manifestSizeLimit);
       return SnapshotDataManifest.parseFrom(cin);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index a5afb91..74aaa4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -66,17 +66,20 @@ public final class SnapshotManifestV1 {
                                                           HRegionFileSystem, Path> {
     private final Configuration conf;
     private final Path snapshotDir;
-    private final FileSystem fs;
+    private final FileSystem rootFs;
+    private final FileSystem workingDirFs;
 
-    public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+    public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
+        final Path snapshotDir) throws IOException {
       this.snapshotDir = snapshotDir;
       this.conf = conf;
-      this.fs = fs;
+      this.rootFs = rootFs;
+      this.workingDirFs = snapshotDir.getFileSystem(conf);
     }
 
     public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
       HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        fs, snapshotDir, regionInfo);
+        workingDirFs, snapshotDir, regionInfo);
       return snapshotRegionFs;
     }
 
@@ -97,13 +100,13 @@ public final class SnapshotManifestV1 {
       boolean success = true;
       if (storeFile.isReference()) {
         // write the Reference object to the snapshot
-        storeFile.getReference().write(fs, referenceFile);
+        storeFile.getReference().write(workingDirFs, referenceFile);
       } else {
         // create "reference" to this store file.  It is intentionally an empty file -- all
         // necessary information is captured by its fs location and filename.  This allows us to
         // only figure out what needs to be done via a single nn operation (instead of having to
         // open and read the files as well).
-        success = fs.createNewFile(referenceFile);
+        success = workingDirFs.createNewFile(referenceFile);
       }
       if (!success) {
         throw new IOException("Failed to create reference file:" + referenceFile);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
index 3b918a0..433c003 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
@@ -68,12 +68,13 @@ public final class SnapshotManifestV2 {
                     SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> {
     private final Configuration conf;
     private final Path snapshotDir;
-    private final FileSystem fs;
+    private final FileSystem rootFs;
 
-    public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+    public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
+        final Path snapshotDir) {
       this.snapshotDir = snapshotDir;
       this.conf = conf;
-      this.fs = fs;
+      this.rootFs = rootFs;
     }
 
     public SnapshotRegionManifest.Builder regionOpen(final HRegionInfo regionInfo) {
@@ -85,9 +86,11 @@ public final class SnapshotManifestV2 {
     public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException {
       // we should ensure the snapshot dir exist, maybe it has been deleted by master
       // see HBASE-16464
-      if (fs.exists(snapshotDir)) {
+      FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf);
+      if (workingDirFs.exists(snapshotDir)) {
         SnapshotRegionManifest manifest = region.build();
-        FSDataOutputStream stream = fs.create(getRegionManifestPath(snapshotDir, manifest));
+        FSDataOutputStream stream = workingDirFs.create(
+            getRegionManifestPath(snapshotDir, manifest));
         try {
           manifest.writeTo(stream);
         } finally {
@@ -120,7 +123,7 @@ public final class SnapshotManifestV2 {
       if (storeFile.isReference()) {
         sfManifest.setReference(storeFile.getReference().convert());
       }
-      sfManifest.setFileSize(storeFile.getReferencedFileStatus(fs).getLen());
+      sfManifest.setFileSize(storeFile.getReferencedFileStatus(rootFs).getLen());
       family.addStoreFiles(sfManifest.build());
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
new file mode 100644
index 0000000..5ac2f0f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+/**
+ * This class tests that the use of a temporary snapshot directory supports snapshot functionality
+ * while the temporary directory is on the same file system as the root directory
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category(LargeTests.class)
+public class TestSnapshotDFSTemporaryDirectory
+    extends TestSnapshotTemporaryDirectory {
+
+  /**
+   * Setup the config for the cluster
+   *
+   * @throws Exception on failure
+   */
+  @BeforeClass public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+    admin = UTIL.getHBaseAdmin();
+  }
+
+  private static void setupConf(Configuration conf) throws IOException {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 10);
+    conf.setInt("hbase.hstore.compactionThreshold", 10);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+        ConstantSizeRegionSplitPolicy.class.getName());
+
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, UTIL.getDefaultRootDirPath().toString()
+        + Path.SEPARATOR + UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir"
+        + Path.SEPARATOR);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
new file mode 100644
index 0000000..cbf02f2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.UUID;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * This class tests that the use of a temporary snapshot directory supports snapshot functionality
+ * while the temporary directory is on a different file system than the root directory
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
+public class TestSnapshotTemporaryDirectory {
+
+  @Parameterized.Parameters public static Iterable<Integer> data() {
+    return Arrays
+        .asList(SnapshotManifestV1.DESCRIPTOR_VERSION, SnapshotManifestV2.DESCRIPTOR_VERSION);
+  }
+
+  @Parameterized.Parameter public int manifestVersion;
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotTemporaryDirectory.class);
+  protected static final int NUM_RS = 2;
+  protected static String TEMP_DIR =
+      Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR + UUID.randomUUID().toString();
+
+  protected static Admin admin;
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  protected static final String STRING_TABLE_NAME = "test";
+  protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
+
+  /**
+   * Setup the config for the cluster
+   *
+   * @throws Exception on failure
+   */
+  @BeforeClass public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+    admin = UTIL.getHBaseAdmin();
+  }
+
+  private static void setupConf(Configuration conf) {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 10);
+    conf.setInt("hbase.hstore.compactionThreshold", 10);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+        ConstantSizeRegionSplitPolicy.class.getName());
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
+  }
+
+  @Before public void setup() throws Exception {
+    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
+    htd.setRegionReplication(getNumReplicas());
+    UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
+  }
+
+  protected int getNumReplicas() {
+    return 1;
+  }
+
+  @After public void tearDown() throws Exception {
+    UTIL.deleteTable(TABLE_NAME);
+    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+    SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+  }
+
+  @AfterClass public static void cleanupTest() {
+    try {
+      UTIL.shutdownMiniCluster();
+      FileUtils.deleteDirectory(new File(TEMP_DIR));
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  @Test(timeout = 180000) public void testRestoreDisabledSnapshot()
+      throws IOException, InterruptedException {
+    long tid = System.currentTimeMillis();
+    TableName tableName = TableName.valueOf("testtb-" + tid);
+    byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+    byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+    int snapshot0Rows;
+    int snapshot1Rows;
+
+    // create Table and disable it
+    SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
+    admin.disableTable(tableName);
+
+    // take an empty snapshot
+    takeSnapshot(tableName, Bytes.toString(emptySnapshot), true);
+
+    // enable table and insert data
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot0Rows = UTIL.countRows(table);
+    }
+    admin.disableTable(tableName);
+
+    // take a snapshot
+    takeSnapshot(tableName, Bytes.toString(snapshotName0), true);
+
+    // enable table and insert more data
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot1Rows = UTIL.countRows(table);
+    }
+
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    admin.disableTable(tableName);
+    takeSnapshot(tableName, Bytes.toString(snapshotName1), true);
+
+    // Restore from snapshot-0
+    admin.restoreSnapshot(snapshotName0);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from emptySnapshot
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(emptySnapshot);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    UTIL.deleteTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+  }
+
+  @Test(timeout = 180000) public void testRestoreEnabledSnapshot()
+      throws IOException, InterruptedException {
+    long tid = System.currentTimeMillis();
+    TableName tableName = TableName.valueOf("testtb-" + tid);
+    byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+    byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+    int snapshot0Rows;
+    int snapshot1Rows;
+
+    // create Table
+    SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
+
+    // take an empty snapshot
+    takeSnapshot(tableName, Bytes.toString(emptySnapshot), false);
+
+    // Insert data
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot0Rows = UTIL.countRows(table);
+    }
+
+    // take a snapshot
+    takeSnapshot(tableName, Bytes.toString(snapshotName0), false);
+
+    // Insert more data
+    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      snapshot1Rows = UTIL.countRows(table);
+    }
+
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    takeSnapshot(tableName, Bytes.toString(snapshotName1), false);
+
+    // Restore from snapshot-0
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName0);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from emptySnapshot
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(emptySnapshot);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    admin.enableTable(tableName);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+    // Restore from snapshot-1
+    UTIL.deleteTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
+    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+  }
+
+  /**
+   * Test snapshotting a table that is offline
+   *
+   * @throws Exception if snapshot does not complete successfully
+   */
+  @Test(timeout = 300000) public void testOfflineTableSnapshot() throws Exception {
+    Admin admin = UTIL.getHBaseAdmin();
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // put some stuff in the table
+    Table table = UTIL.getConnection().getTable(TABLE_NAME);
+    UTIL.loadTable(table, TEST_FAM, false);
+
+    LOG.debug("FS state before disable:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+    // XXX if this is flakey, might want to consider using the async version and looping as
+    // disableTable can succeed and still timeout.
+    admin.disableTable(TABLE_NAME);
+
+    LOG.debug("FS state before snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    // take a snapshot of the disabled table
+    final String SNAPSHOT_NAME = "offlineTableSnapshot";
+    byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
+    takeSnapshot(TABLE_NAME, SNAPSHOT_NAME, true);
+    LOG.debug("Snapshot completed.");
+
+    // make sure we have the snapshot
+    List<SnapshotDescription> snapshots =
+        SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
+
+    // make sure its a valid snapshot
+    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+    LOG.debug("FS state after snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    SnapshotTestingUtils
+        .confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, admin, fs);
+
+    admin.deleteSnapshot(snapshot);
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+  }
+
+  /**
+   * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming
+   * the contents of the original and cloned table match
+   *
+   * @throws Exception if snapshot does not complete successfully
+   */
+  @Test(timeout = 180000) public void testSnapshotCloneContents() throws Exception {
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // put some stuff in the table
+    Table table = UTIL.getConnection().getTable(TABLE_NAME);
+    UTIL.loadTable(table, TEST_FAM);
+    table.close();
+
+    String snapshot1 = "TableSnapshot1";
+    takeSnapshot(TABLE_NAME, snapshot1, false);
+    LOG.debug("Snapshot1 completed.");
+
+    TableName clone = TableName.valueOf("Table1Clone");
+    admin.cloneSnapshot(snapshot1, clone, false);
+
+    Scan original = new Scan();
+    Scan cloned = new Scan();
+    ResultScanner originalScan = admin.getConnection().getTable(TABLE_NAME).getScanner(original);
+    ResultScanner clonedScan =
+        admin.getConnection().getTable(TableName.valueOf("Table1Clone")).getScanner(cloned);
+
+    Iterator<Result> i = originalScan.iterator();
+    Iterator<Result> i2 = clonedScan.iterator();
+    assertTrue(i.hasNext());
+    while (i.hasNext()) {
+      assertTrue(i2.hasNext());
+      assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})),
+          Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {})));
+    }
+    assertFalse(i2.hasNext());
+    admin.deleteSnapshot(snapshot1);
+    UTIL.deleteTable(clone);
+    admin.close();
+  }
+
+  @Test(timeout = 180000) public void testOfflineTableSnapshotWithEmptyRegion() throws Exception {
+    // test with an empty table with one region
+
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    LOG.debug("FS state before disable:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+    admin.disableTable(TABLE_NAME);
+
+    LOG.debug("FS state before snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    // take a snapshot of the disabled table
+    byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegion");
+    takeSnapshot(TABLE_NAME, Bytes.toString(snapshot), true);
+    LOG.debug("Snapshot completed.");
+
+    // make sure we have the snapshot
+    List<SnapshotDescription> snapshots =
+        SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
+
+    // make sure its a valid snapshot
+    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+    LOG.debug("FS state after snapshot:");
+    FSUtils
+        .logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
+            LOG);
+
+    List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
+    List<byte[]> nonEmptyCfs = Lists.newArrayList();
+    SnapshotTestingUtils
+        .confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs, rootDir,
+            admin, fs);
+
+    admin.deleteSnapshot(snapshot);
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+  }
+
+  // Ensures that the snapshot is transferred to the proper completed snapshot directory
+  @Test(timeout = 180000) public void testEnsureTemporaryDirectoryTransfer() throws Exception {
+    Admin admin = null;
+    TableName tableName2 = TableName.valueOf("testListTableSnapshots");
+    try {
+      admin = UTIL.getHBaseAdmin();
+
+      HTableDescriptor htd = new HTableDescriptor(tableName2);
+      UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
+
+      String table1Snapshot1 = "Table1Snapshot1";
+      takeSnapshot(TABLE_NAME, table1Snapshot1, false);
+      LOG.debug("Snapshot1 completed.");
+
+      String table1Snapshot2 = "Table1Snapshot2";
+      takeSnapshot(TABLE_NAME, table1Snapshot2, false);
+      LOG.debug("Snapshot2 completed.");
+
+      String table2Snapshot1 = "Table2Snapshot1";
+      takeSnapshot(TABLE_NAME, table2Snapshot1, false);
+      LOG.debug("Table2Snapshot1 completed.");
+
+      List<SnapshotDescription> listTableSnapshots = admin.listTableSnapshots("test.*", ".*");
+      List<String> listTableSnapshotNames = new ArrayList<String>();
+      assertEquals(3, listTableSnapshots.size());
+      for (SnapshotDescription s : listTableSnapshots) {
+        listTableSnapshotNames.add(s.getName());
+      }
+      assertTrue(listTableSnapshotNames.contains(table1Snapshot1));
+      assertTrue(listTableSnapshotNames.contains(table1Snapshot2));
+      assertTrue(listTableSnapshotNames.contains(table2Snapshot1));
+    } finally {
+      if (admin != null) {
+        try {
+          admin.deleteSnapshots("Table.*");
+        } catch (SnapshotDoesNotExistException ignore) {
+        }
+        if (admin.tableExists(tableName2)) {
+          UTIL.deleteTable(tableName2);
+        }
+        admin.close();
+      }
+    }
+  }
+
+  private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled)
+      throws IOException {
+    Type type = disabled ? Type.DISABLED : Type.FLUSH;
+    SnapshotDescription desc = SnapshotDescription.newBuilder()
+        .setTable(tableName.getNameAsString())
+        .setName(snapshotName)
+        .setVersion(manifestVersion)
+        .setType(type)
+        .build();
+    admin.snapshot(desc);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
new file mode 100644
index 0000000..c720b8e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestSnapshotTemporaryDirectoryWithRegionReplicas
+    extends TestSnapshotTemporaryDirectory {
+
+  @Override
+  protected int getNumReplicas() {
+    return 3;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
index 7d618c3..f509771 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
@@ -136,7 +136,8 @@ public class TestSnapshotHFileCleaner {
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+          TEST_UTIL.getConfiguration()), true);
     }
   }
 
@@ -163,7 +164,8 @@ public class TestSnapshotHFileCleaner {
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+          TEST_UTIL.getConfiguration()), true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 9847bc4..6820019 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -499,8 +499,8 @@ public final class SnapshotTestingUtils {
         this.htd = htd;
         this.desc = desc;
         this.tableRegions = tableRegions;
-        this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
-        new FSTableDescriptors(conf)
+        this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
+        new FSTableDescriptors(conf, snapshotDir.getFileSystem(conf), rootDir)
           .createTableDescriptorForTableDirectory(snapshotDir, htd, false);
       }
 
@@ -624,8 +624,11 @@ public final class SnapshotTestingUtils {
         SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor);
         manifest.addTableDescriptor(htd);
         manifest.consolidate();
-        SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir, fs);
-        snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
+        Path workingDir = snapshotDir;
+        Path completedSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
+        SnapshotDescriptionUtils.completeSnapshot(completedSnapshotDir, workingDir, fs,
+            workingDir.getFileSystem(conf), conf);
+        snapshotDir = completedSnapshotDir;
         return snapshotDir;
       }
 
@@ -679,8 +682,8 @@ public final class SnapshotTestingUtils {
         .setVersion(version)
         .build();
 
-      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
-      SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
+      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
+      SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, workingDir.getFileSystem(conf));
       return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
new file mode 100644
index 0000000..c8e0266
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.util.UUID;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({MediumTests.class})
+public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot {
+
+  protected static String TEMP_DIR = Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR
+      + UUID.randomUUID().toString();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    setUpBaseConf(TEST_UTIL.getConfiguration());
+    TEST_UTIL.startMiniCluster(3);
+    TEST_UTIL.startMiniMapReduceCluster();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TestExportSnapshot.tearDownAfterClass();
+    FileUtils.deleteDirectory(new File(TEMP_DIR));
+  }
+
+  public static void setUpBaseConf(Configuration conf) {
+    TestExportSnapshot.setUpBaseConf(conf);
+    conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d385ba1a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
index 403b1e6..bd361ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
@@ -119,7 +119,7 @@ public class TestRegionSnapshotTask {
 
     final HRegion region = spy(hRegions.get(0));
 
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
     final SnapshotManifest manifest =
         SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
     manifest.addTableDescriptor(table.getTableDescriptor());
@@ -164,7 +164,7 @@ public class TestRegionSnapshotTask {
   private void addRegionToSnapshot(HBaseProtos.SnapshotDescription snapshot,
       HRegion region, SnapshotManifest manifest) throws Exception {
     LOG.info("Adding region to snapshot: " + region.getRegionInfo().getRegionNameAsString());
-    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
     SnapshotManifest.RegionVisitor visitor = createRegionVisitorWithDelay(snapshot, workingDir);
     manifest.addRegion(region, visitor);
     LOG.info("Added the region to snapshot: " + region.getRegionInfo().getRegionNameAsString());