You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/11/10 18:04:44 UTC

[geode] branch feature/GEODE-3940 updated: WIP

This is an automated email from the ASF dual-hosted git repository.

klund pushed a commit to branch feature/GEODE-3940
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/feature/GEODE-3940 by this push:
     new f380c1d  WIP
f380c1d is described below

commit f380c1dab69b0960a5461ea2bf63b0f2b8787340
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Fri Nov 10 10:02:57 2017 -0800

    WIP
    
    * BackupDistributedTest is now failing
    * Unimplemented PrepareBackupRequestTest tests fail
    * FlushToDisk and FinishBackupRequest need to be changed to match
    PrepareBackupRequest
---
 .../admin/internal/AdminDistributedSystemImpl.java |   2 +-
 .../geode/admin/internal/FlushToDiskResponse.java  |  43 -
 .../geode/admin/internal/PrepareBackupRequest.java | 154 ----
 .../internal/DistributionMessageObserver.java      |  16 +-
 .../org/apache/geode/internal/DSFIDFactory.java    |  12 +-
 .../org/apache/geode/internal/SystemAdmin.java     |   2 +-
 .../internal/admin/remote/CliLegacyMessage.java    |   1 -
 .../geode/internal/cache/DirectoryHolder.java      |   2 +-
 .../geode/internal/cache/DiskStoreBackup.java      |   2 -
 .../geode/internal/cache/DiskStoreFactoryImpl.java |   1 +
 .../apache/geode/internal/cache/DiskStoreImpl.java |  10 +-
 .../geode/internal/cache/GemFireCacheImpl.java     |   3 +-
 .../apache/geode/internal/cache/InternalCache.java |   1 +
 .../org/apache/geode/internal/cache/Oplog.java     |  22 +-
 .../internal/cache/PartitionedRegionDataStore.java |   1 +
 .../cache/backup}/BackupDataStoreHelper.java       |  28 +-
 .../cache/backup}/BackupDataStoreResult.java       |  24 +-
 .../{persistence => backup}/BackupInspector.java   |  24 +-
 .../internal/cache/{ => backup}/BackupLock.java    |  28 +-
 .../internal/cache/{ => backup}/BackupManager.java |  39 +-
 .../cache/backup/BackupResultCollector.java        |  26 +
 .../internal/cache/{ => backup}/BackupUtil.java    |  26 +-
 .../cache/backup/FinishBackupReplyProcessor.java   |  72 ++
 .../cache/backup}/FinishBackupRequest.java         |  70 +-
 .../cache/backup}/FinishBackupResponse.java        |  24 +-
 .../cache/backup/FlushToDiskProcessor.java         |  40 +
 .../cache/backup}/FlushToDiskRequest.java          |  43 +-
 .../internal/cache/backup/FlushToDiskResponse.java |  45 +
 .../geode/internal/cache/backup/PrepareBackup.java |  46 +
 .../cache/backup/PrepareBackupFactory.java         |  44 +
 .../cache/backup/PrepareBackupOperation.java       |  96 ++
 .../cache/backup/PrepareBackupReplyProcessor.java  |  48 +
 .../cache/backup/PrepareBackupRequest.java         |  73 ++
 .../cache/backup}/PrepareBackupResponse.java       |  40 +-
 .../{persistence => backup}/RestoreScript.java     |  24 +-
 .../{persistence => backup}/ScriptGenerator.java   |  24 +-
 .../UnixBackupInspector.java                       |  24 +-
 .../UnixScriptGenerator.java                       |  27 +-
 .../WindowsBackupInspector.java                    |  24 +-
 .../WindowsScriptGenerator.java                    |  25 +-
 .../internal/cache/xmlcache/CacheCreation.java     |   2 +-
 .../internal/beans/DistributedSystemBridge.java    |   4 +-
 .../internal/beans/MemberMBeanBridge.java          |   2 +-
 .../cli/commands/BackupDiskStoreCommand.java       |   2 +-
 .../admin/internal/PrepareBackupRequestTest.java   | 207 -----
 .../BackupDistributedTest.java}                    | 642 +++++++-------
 .../BackupInspectorIntegrationTest.java            |  37 +-
 .../BackupIntegrationTest.java}                    |  86 +-
 .../cache/{ => backup}/BackupLockTest.java         |  36 +-
 .../BackupPrepareAndFinishMsgDUnitTest.java        |  73 +-
 .../cache/backup}/FinishBackupRequestTest.java     |  51 +-
 .../cache/backup}/FlushToDiskRequestTest.java      |  44 +-
 .../IncrementalBackupDistributedTest.java}         |  72 +-
 ...titionedBackupPrepareAndFinishMsgDUnitTest.java |  29 +
 .../cache/backup/PrepareBackupFactoryTest.java     |  81 ++
 .../cache/backup/PrepareBackupOperationTest.java   | 177 ++++
 .../backup/PrepareBackupReplyProcessorTest.java    |  91 ++
 .../cache/backup/PrepareBackupRequestTest.java     |  98 +++
 ...eplicateBackupPrepareAndFinishMsgDUnitTest.java |  29 +
 .../UnixScriptGeneratorTest.java                   |  37 +-
 .../WindowsScriptGeneratorTest.java                |  37 +-
 .../cache/partitioned/PersistPRKRFDUnitTest.java   |  36 +-
 ...sistentColocatedPartitionedRegionDUnitTest.java | 226 ++---
 .../PersistentPartitionedRegionDUnitTest.java      | 121 +--
 .../PersistentPartitionedRegionTestBase.java       | 975 ++++++++-------------
 ...tPartitionedRegionWithTransactionDUnitTest.java |  28 +-
 ...titionedBackupPrepareAndFinishMsgDUnitTest.java |  28 -
 .../PersistentRecoveryOrderDUnitTest.java          |   2 +-
 ...eplicateBackupPrepareAndFinishMsgDUnitTest.java |  28 -
 .../beans/DistributedSystemBridgeJUnitTest.java    |   8 +-
 .../pdx/ClientsWithVersioningRetryDUnitTest.java   |   4 +-
 .../BackupIntegrationTest.cache.xml}               |   0
 72 files changed, 2434 insertions(+), 2115 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
index 303fda8..8e23a31 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
@@ -38,7 +38,7 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.Banner;
 import org.apache.geode.internal.admin.*;
 import org.apache.geode.internal.admin.remote.*;
-import org.apache.geode.internal.cache.BackupUtil;
+import org.apache.geode.internal.cache.backup.BackupUtil;
 import org.apache.geode.internal.cache.persistence.PersistentMemberPattern;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.InternalLogWriter;
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskResponse.java b/geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskResponse.java
deleted file mode 100644
index a85a6aa..0000000
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskResponse.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.admin.internal;
-
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.admin.remote.AdminResponse;
-
-/**
- * The response to the {@link FlushToDiskRequest}
- *
- *
- */
-public class FlushToDiskResponse extends AdminResponse {
-
-  public FlushToDiskResponse() {
-    super();
-  }
-
-  public FlushToDiskResponse(InternalDistributedMember sender) {
-    this.setRecipient(sender);
-  }
-
-  public int getDSFID() {
-    return FLUSH_TO_DISK_RESPONSE;
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getName();
-  }
-}
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupRequest.java b/geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupRequest.java
deleted file mode 100644
index 3679ba8..0000000
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupRequest.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.admin.internal;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.logging.log4j.Logger;
-
-import org.apache.geode.CancelException;
-import org.apache.geode.cache.persistence.PersistentID;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.distributed.internal.DistributionMessage;
-import org.apache.geode.distributed.internal.ReplyException;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.admin.remote.AdminFailureResponse;
-import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
-import org.apache.geode.internal.admin.remote.AdminResponse;
-import org.apache.geode.internal.admin.remote.CliLegacyMessage;
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.i18n.LocalizedStrings;
-import org.apache.geode.internal.logging.LogService;
-import org.apache.geode.internal.logging.log4j.LocalizedMessage;
-
-/**
- * A request to from an admin VM to all non admin members to start a backup. In the prepare phase of
- * the backup, the members will suspend bucket destroys to make sure buckets aren't missed during
- * the backup.
- */
-public class PrepareBackupRequest extends CliLegacyMessage {
-  private static final Logger logger = LogService.getLogger();
-
-  private final DM dm;
-  private final PrepareBackupReplyProcessor replyProcessor;
-
-  public PrepareBackupRequest() {
-    super();
-    this.dm = null;
-    this.replyProcessor = null;
-  }
-
-  private PrepareBackupRequest(DM dm, Set<InternalDistributedMember> recipients) {
-    this(dm, recipients, new PrepareBackupReplyProcessor(dm, recipients));
-  }
-
-  PrepareBackupRequest(DM dm, Set<InternalDistributedMember> recipients,
-      PrepareBackupReplyProcessor replyProcessor) {
-    this.dm = dm;
-    setRecipients(recipients);
-    this.replyProcessor = replyProcessor;
-    this.msgId = this.replyProcessor.getProcessorId();
-  }
-
-  public static Map<DistributedMember, Set<PersistentID>> send(DM dm, Set recipients) {
-    PrepareBackupRequest request = new PrepareBackupRequest(dm, recipients);
-    return request.send();
-  }
-
-  Map<DistributedMember, Set<PersistentID>> send() {
-    dm.putOutgoing(this);
-
-    AdminResponse response = createResponse(dm);
-
-    try {
-      replyProcessor.waitForReplies();
-    } catch (ReplyException e) {
-      if (!(e.getCause() instanceof CancelException)) {
-        throw e;
-      }
-    } catch (InterruptedException e) {
-      logger.warn(e.getMessage(), e);
-    }
-
-    response.setSender(dm.getDistributionManagerId());
-    replyProcessor.process(response, false);
-    return replyProcessor.getResults();
-  }
-
-  @Override
-  protected AdminResponse createResponse(DM dm) {
-    HashSet<PersistentID> persistentIds;
-    try {
-      persistentIds = prepareForBackup(dm);
-    } catch (IOException e) {
-      logger.error(LocalizedMessage.create(LocalizedStrings.CliLegacyMessage_ERROR, getClass()), e);
-      return AdminFailureResponse.create(getSender(), e);
-    }
-    return new PrepareBackupResponse(getSender(), persistentIds);
-  }
-
-  HashSet<PersistentID> prepareForBackup(DM dm) throws IOException {
-    InternalCache cache = dm.getCache();
-    HashSet<PersistentID> persistentIds;
-    if (cache == null) {
-      persistentIds = new HashSet<>();
-    } else {
-      persistentIds = cache.startBackup(getSender()).prepareForBackup();
-    }
-    return persistentIds;
-  }
-
-  @Override
-  public int getDSFID() {
-    return PREPARE_BACKUP_REQUEST;
-  }
-
-  static class PrepareBackupReplyProcessor extends AdminMultipleReplyProcessor {
-
-    private final Map<DistributedMember, Set<PersistentID>> results =
-        Collections.synchronizedMap(new HashMap<DistributedMember, Set<PersistentID>>());
-
-    PrepareBackupReplyProcessor(DM dm, Collection initMembers) {
-      super(dm, initMembers);
-    }
-
-    @Override
-    protected boolean stopBecauseOfExceptions() {
-      return false;
-    }
-
-    @Override
-    protected void process(DistributionMessage message, boolean warn) {
-      if (message instanceof PrepareBackupResponse) {
-        HashSet<PersistentID> persistentIds = ((PrepareBackupResponse) message).getPersistentIds();
-        if (persistentIds != null && !persistentIds.isEmpty()) {
-          results.put(message.getSender(), persistentIds);
-        }
-      }
-      super.process(message, warn);
-    }
-
-    Map<DistributedMember, Set<PersistentID>> getResults() {
-      return results;
-    }
-  }
-}
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessageObserver.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessageObserver.java
index e918e12..72e4811 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessageObserver.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessageObserver.java
@@ -14,11 +14,8 @@
  */
 package org.apache.geode.distributed.internal;
 
-
 /**
  * This class is a test hook to intercept DistributionMessages in the VM receiving the message.
- *
- *
  */
 public abstract class DistributionMessageObserver {
 
@@ -27,7 +24,6 @@ public abstract class DistributionMessageObserver {
   /**
    * Set the instance of the observer. Setting to null will clear the observer.
    *
-   * @param instance
    * @return the old observer, or null if there was no old observer.
    */
   public static DistributionMessageObserver setInstance(DistributionMessageObserver instance) {
@@ -47,7 +43,7 @@ public abstract class DistributionMessageObserver {
    * @param message The message itself
    */
   public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
-
+    // override as needed
   }
 
   /**
@@ -57,16 +53,16 @@ public abstract class DistributionMessageObserver {
    * @param message The message itself
    */
   public void afterProcessMessage(DistributionManager dm, DistributionMessage message) {
-
+    // override as needed
   }
 
   /**
    * Called just before a message is distributed.
    *
-   * @param dm the distribution manager that's sending the messsage
-   * @param msg the message itself
+   * @param dm the distribution manager that's sending the message
+   * @param message the message itself
    */
-  public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {
-
+  public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
+    // override as needed
   }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index f8c8d80..cdab0e2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -24,12 +24,12 @@ import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
 
 import org.apache.geode.DataSerializer;
 import org.apache.geode.InternalGemFireError;
-import org.apache.geode.admin.internal.FinishBackupRequest;
-import org.apache.geode.admin.internal.FinishBackupResponse;
-import org.apache.geode.admin.internal.FlushToDiskRequest;
-import org.apache.geode.admin.internal.FlushToDiskResponse;
-import org.apache.geode.admin.internal.PrepareBackupRequest;
-import org.apache.geode.admin.internal.PrepareBackupResponse;
+import org.apache.geode.internal.cache.backup.FinishBackupRequest;
+import org.apache.geode.internal.cache.backup.FinishBackupResponse;
+import org.apache.geode.internal.cache.backup.FlushToDiskRequest;
+import org.apache.geode.internal.cache.backup.FlushToDiskResponse;
+import org.apache.geode.internal.cache.backup.PrepareBackupRequest;
+import org.apache.geode.internal.cache.backup.PrepareBackupResponse;
 import org.apache.geode.admin.internal.SystemMemberCacheEventProcessor;
 import org.apache.geode.admin.jmx.internal.StatAlertNotification;
 import org.apache.geode.cache.InterestResultPolicy;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java b/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
index 2f1eda9..792891a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
@@ -75,7 +75,7 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.distributed.internal.tcpserver.TcpClient;
 import org.apache.geode.internal.admin.remote.TailLogResponse;
-import org.apache.geode.internal.cache.BackupUtil;
+import org.apache.geode.internal.cache.backup.BackupUtil;
 import org.apache.geode.internal.cache.DiskStoreImpl;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.DateFormatter;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/CliLegacyMessage.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/CliLegacyMessage.java
index bf8ff7f..47bf5ff 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/CliLegacyMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/CliLegacyMessage.java
@@ -51,5 +51,4 @@ public abstract class CliLegacyMessage extends AdminRequest {
           LocalizedStrings.AdminRequest_RESPONSE_TO__0__WAS_CANCELLED, this.getClass().getName()));
     }
   }
-
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DirectoryHolder.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DirectoryHolder.java
index 16b3baf..335aa1a 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DirectoryHolder.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DirectoryHolder.java
@@ -91,7 +91,7 @@ public class DirectoryHolder {
     return dir;
   }
 
-  int getArrayIndex() {
+  public int getArrayIndex() {
     return this.index;
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
index 9b005ff..b4428e8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
@@ -19,8 +19,6 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.geode.internal.cache.persistence.BackupInspector;
-
 /**
  * This class manages the state of the backup of an individual disk store. It holds the list of
  * oplogs that still need to be backed up, along with the lists of oplog files that should be
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
index d09e70e..ce52220 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
@@ -21,6 +21,7 @@ import org.apache.geode.GemFireIOException;
 import org.apache.geode.cache.DiskStore;
 import org.apache.geode.cache.DiskStoreFactory;
 import org.apache.geode.distributed.internal.ResourceEvent;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.geode.internal.cache.xmlcache.CacheCreation;
 import org.apache.geode.internal.cache.xmlcache.CacheXml;
 import org.apache.geode.internal.cache.xmlcache.DiskStoreAttributesCreation;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index cc66ca7..7f3793a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -59,6 +59,8 @@ import java.util.regex.Pattern;
 
 import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
 import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
+import org.apache.geode.internal.cache.backup.BackupLock;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelCriterion;
@@ -2027,7 +2029,7 @@ public class DiskStoreImpl implements DiskStore {
     return this.directories[this.infoFileDirIndex];
   }
 
-  int getInforFileDirIndex() {
+  public int getInforFileDirIndex() {
     return this.infoFileDirIndex;
   }
 
@@ -2694,7 +2696,7 @@ public class DiskStoreImpl implements DiskStore {
   /**
    * Get all of the oplogs
    */
-  Oplog[] getAllOplogsForBackup() {
+  public Oplog[] getAllOplogsForBackup() {
     return persistentOplogs.getAllOplogs();
   }
 
@@ -2939,7 +2941,7 @@ public class DiskStoreImpl implements DiskStore {
   public static class KillCompactorException extends RuntimeException {
   }
 
-  DiskInitFile getDiskInitFile() {
+  public DiskInitFile getDiskInitFile() {
     return this.initFile;
   }
 
@@ -4325,7 +4327,7 @@ public class DiskStoreImpl implements DiskStore {
    * Wait for any current operations in the delayed write pool. Completion of this method ensures
    * that the writes have completed or the pool was shutdown
    */
-  protected void waitForDelayedWrites() {
+  public void waitForDelayedWrites() {
     Future<?> lastWriteTask = lastDelayedWrite;
     if (lastWriteTask != null) {
       try {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index e8aac76..a2888b9 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -77,6 +77,7 @@ import javax.transaction.TransactionManager;
 import com.sun.jna.Native;
 import com.sun.jna.Platform;
 import org.apache.commons.lang.StringUtils;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelCriterion;
@@ -134,9 +135,7 @@ import org.apache.geode.cache.query.QueryService;
 import org.apache.geode.cache.query.internal.DefaultQuery;
 import org.apache.geode.cache.query.internal.DefaultQueryService;
 import org.apache.geode.cache.query.internal.InternalQueryService;
-import org.apache.geode.cache.query.internal.MethodInvocationAuthorizer;
 import org.apache.geode.cache.query.internal.QueryMonitor;
-import org.apache.geode.cache.query.internal.RestrictedMethodInvocationAuthorizer;
 import org.apache.geode.cache.query.internal.cq.CqService;
 import org.apache.geode.cache.query.internal.cq.CqServiceProvider;
 import org.apache.geode.cache.server.CacheServer;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
index 58ab77d..9ec1fd0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
@@ -52,6 +52,7 @@ import org.apache.geode.distributed.internal.DistributionAdvisor;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.SystemTimer;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
 import org.apache.geode.internal.cache.control.ResourceAdvisor;
 import org.apache.geode.internal.cache.event.EventTrackerExpiryTask;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
index 9c3e52b..231d8bf 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
@@ -27,10 +27,8 @@ import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.io.SyncFailedException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
-import java.nio.file.Files;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -55,7 +53,7 @@ import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
 import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
 import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
 import it.unimi.dsi.fastutil.objects.ObjectIterator;
-import org.apache.commons.io.FileUtils;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelException;
@@ -688,6 +686,10 @@ public class Oplog implements CompactableOplog, Flushable {
     }
   }
 
+  public Object getLock() {
+    return lock;
+  }
+
   public void replaceIncompatibleEntry(DiskRegionView dr, DiskEntry old, DiskEntry repl) {
     boolean useNextOplog = false;
     // No need to get the backup lock prior to synchronizing (correct lock order) since the
@@ -1173,11 +1175,11 @@ public class Oplog implements CompactableOplog, Flushable {
     }
   }
 
-  File getCrfFile() {
+  public File getCrfFile() {
     return this.crf.f;
   }
 
-  File getDrfFile() {
+  public File getDrfFile() {
     return this.drf.f;
   }
 
@@ -1187,7 +1189,7 @@ public class Oplog implements CompactableOplog, Flushable {
    *
    * @param oplogFileNames a Set of operation log file names.
    */
-  Set<String> gatherMatchingOplogFiles(Set<String> oplogFileNames) {
+  public Set<String> gatherMatchingOplogFiles(Set<String> oplogFileNames) {
     Set<String> matchingFiles = new HashSet<>();
 
     // CRF match?
@@ -1221,7 +1223,7 @@ public class Oplog implements CompactableOplog, Flushable {
    * @return a map of baslineline oplog files to copy. May be empty if total current set for this
    *         oplog does not match the baseline.
    */
-  Map<File, File> mapBaseline(Collection<File> baselineOplogFiles) {
+  public Map<File, File> mapBaseline(Collection<File> baselineOplogFiles) {
     // Map of baseline oplog file name to oplog file
     Map<String, File> baselineOplogMap =
         TransformUtils.transformAndMap(baselineOplogFiles, TransformUtils.fileNameTransformer);
@@ -4233,7 +4235,7 @@ public class Oplog implements CompactableOplog, Flushable {
     }
   }
 
-  File getKrfFile() {
+  public File getKrfFile() {
     return new File(this.diskFile.getPath() + KRF_FILE_EXT);
   }
 
@@ -5822,7 +5824,7 @@ public class Oplog implements CompactableOplog, Flushable {
     return this.crf.channel;
   }
 
-  DirectoryHolder getDirectoryHolder() {
+  public DirectoryHolder getDirectoryHolder() {
     return this.dirHolder;
   }
 
@@ -7751,7 +7753,7 @@ public class Oplog implements CompactableOplog, Flushable {
     }
   }
 
-  void finishKrf() {
+  public void finishKrf() {
     createKrf(false);
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
index a4cf98a..43008f6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
@@ -31,6 +31,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.InternalGemFireError;
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreHelper.java
similarity index 69%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreHelper.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreHelper.java
index 551aaa1..364bca6 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreHelper.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.File;
 import java.util.Map;
@@ -34,14 +36,14 @@ public class BackupDataStoreHelper {
 
   @SuppressWarnings("rawtypes")
   public static BackupDataStoreResult backupAllMembers(DM dm, Set recipients, File targetDir,
-      File baselineDir) {
+                                                       File baselineDir) {
     FlushToDiskRequest.send(dm, recipients);
 
     boolean abort = true;
     Map<DistributedMember, Set<PersistentID>> successfulMembers;
     Map<DistributedMember, Set<PersistentID>> existingDataStores;
     try {
-      existingDataStores = PrepareBackupRequest.send(dm, recipients);
+      existingDataStores = new PrepareBackupOperation(dm, dm.getId(), dm.getCache(), recipients).send();
       abort = false;
     } finally {
       successfulMembers = FinishBackupRequest.send(dm, recipients, targetDir, baselineDir, abort);
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreResult.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreResult.java
similarity index 63%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreResult.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreResult.java
index eae674b..d838cc9 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/BackupDataStoreResult.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDataStoreResult.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import java.util.Map;
 import java.util.Set;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/BackupInspector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupInspector.java
similarity index 86%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/BackupInspector.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupInspector.java
index 8c707f8..4ca582e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/BackupInspector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupInspector.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedReader;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupLock.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupLock.java
similarity index 78%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/BackupLock.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupLock.java
index 23f075d..5392a36 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupLock.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupLock.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Condition;
@@ -47,7 +49,7 @@ public class BackupLock extends ReentrantLock {
     hook.set(testHook);
   }
 
-  void lockForBackup() {
+  public void lockForBackup() {
     super.lock();
     isBackingUp = true;
     super.unlock();
@@ -57,7 +59,7 @@ public class BackupLock extends ReentrantLock {
     isBackupThread.set(true);
   }
 
-  void unlockForBackup() {
+  public void unlockForBackup() {
     super.lock();
     isBackingUp = false;
     isBackupThread.remove();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupManager.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
similarity index 94%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/BackupManager.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
index a35bb8b..6134cac 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupManager.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -29,6 +31,12 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.geode.internal.cache.DirectoryHolder;
+import org.apache.geode.internal.cache.DiskStoreBackup;
+import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.Oplog;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.InternalGemFireError;
@@ -42,8 +50,6 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMembe
 import org.apache.geode.internal.ClassPathLoader;
 import org.apache.geode.internal.DeployedJar;
 import org.apache.geode.internal.JarDeployer;
-import org.apache.geode.internal.cache.persistence.BackupInspector;
-import org.apache.geode.internal.cache.persistence.RestoreScript;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
 
@@ -51,7 +57,7 @@ import org.apache.geode.internal.logging.LogService;
  * This class manages the state an logic to backup a single cache.
  */
 public class BackupManager implements MembershipListener {
-  private static final Logger logger = LogService.getLogger(BackupManager.class);
+  private static final Logger logger = LogService.getLogger();
 
   static final String INCOMPLETE_BACKUP_FILE = "INCOMPLETE_BACKUP_FILE";
 
@@ -311,7 +317,7 @@ public class BackupManager implements MembershipListener {
         }
 
         // Get an appropriate lock object for each set of oplogs.
-        Object childLock = childOplog.lock;
+        Object childLock = childOplog.getLock();
 
         // TODO - We really should move this lock into the disk store, but
         // until then we need to do this magic to make sure we're actually
@@ -581,13 +587,14 @@ public class BackupManager implements MembershipListener {
   }
 
   private void backupFile(File targetDir, File file) throws IOException {
-    if (file != null && file.exists())
+    if (file != null && file.exists()) {
       try {
         Files.createLink(targetDir.toPath().resolve(file.getName()), file.toPath());
       } catch (IOException | UnsupportedOperationException e) {
         logger.warn("Unable to create hard link for + {}. Reverting to file copy", targetDir);
         FileUtils.copyFileToDirectory(file, targetDir);
       }
+    }
   }
 
   private String cleanSpecialCharacters(String string) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupResultCollector.java
new file mode 100644
index 0000000..050d98e
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupResultCollector.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+
+import java.util.Set;
+
+interface BackupResultCollector {
+  void addToResults(InternalDistributedMember member, Set<PersistentID> persistentIds);
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupUtil.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupUtil.java
similarity index 75%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/BackupUtil.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupUtil.java
index e9ae635..8897411 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BackupUtil.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupUtil.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.File;
 import java.text.SimpleDateFormat;
@@ -20,8 +22,6 @@ import java.util.Date;
 import java.util.Set;
 
 import org.apache.geode.admin.internal.AdminDistributedSystemImpl;
-import org.apache.geode.admin.internal.BackupDataStoreHelper;
-import org.apache.geode.admin.internal.BackupDataStoreResult;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.internal.i18n.LocalizedStrings;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupReplyProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupReplyProcessor.java
new file mode 100644
index 0000000..0afc45f
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupReplyProcessor.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+class FinishBackupReplyProcessor extends AdminMultipleReplyProcessor {
+
+  private final Map<DistributedMember, Set<PersistentID>> results =
+      Collections.synchronizedMap(new HashMap<DistributedMember, Set<PersistentID>>());
+
+  FinishBackupReplyProcessor(DM dm, Collection initMembers) {
+    super(dm, initMembers);
+  }
+
+  @Override
+  protected boolean stopBecauseOfExceptions() {
+    return false;
+  }
+
+  @Override
+  protected int getAckWaitThreshold() {
+    // Disable the 15 second warning if the backup is taking a long time
+    return 0;
+  }
+
+  @Override
+  public long getAckSevereAlertThresholdMS() {
+    // Don't log severe alerts for backups either
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  protected void process(DistributionMessage message, boolean warn) {
+    if (message instanceof FinishBackupResponse) {
+      HashSet<PersistentID> persistentIds = ((FinishBackupResponse) message).getPersistentIds();
+      if (persistentIds != null && !persistentIds.isEmpty()) {
+        results.put(message.getSender(), persistentIds);
+      }
+    }
+    super.process(message, warn);
+  }
+
+  Map<DistributedMember, Set<PersistentID>> getResults() {
+    return results;
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupRequest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupRequest.java
similarity index 68%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupRequest.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupRequest.java
index ec19fbb..a277478 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupRequest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupRequest.java
@@ -1,26 +1,25 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.File;
 import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
@@ -32,11 +31,9 @@ import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.admin.remote.AdminFailureResponse;
-import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
 import org.apache.geode.internal.admin.remote.AdminResponse;
 import org.apache.geode.internal.admin.remote.CliLegacyMessage;
 import org.apache.geode.internal.cache.InternalCache;
@@ -153,45 +150,4 @@ public class FinishBackupRequest extends CliLegacyMessage {
     DataSerializer.writeBoolean(abort, out);
   }
 
-  static class FinishBackupReplyProcessor extends AdminMultipleReplyProcessor {
-
-    private final Map<DistributedMember, Set<PersistentID>> results =
-        Collections.synchronizedMap(new HashMap<DistributedMember, Set<PersistentID>>());
-
-    FinishBackupReplyProcessor(DM dm, Collection initMembers) {
-      super(dm, initMembers);
-    }
-
-    @Override
-    protected boolean stopBecauseOfExceptions() {
-      return false;
-    }
-
-    @Override
-    protected int getAckWaitThreshold() {
-      // Disable the 15 second warning if the backup is taking a long time
-      return 0;
-    }
-
-    @Override
-    public long getAckSevereAlertThresholdMS() {
-      // Don't log severe alerts for backups either
-      return Long.MAX_VALUE;
-    }
-
-    @Override
-    protected void process(DistributionMessage message, boolean warn) {
-      if (message instanceof FinishBackupResponse) {
-        HashSet<PersistentID> persistentIds = ((FinishBackupResponse) message).getPersistentIds();
-        if (persistentIds != null && !persistentIds.isEmpty()) {
-          results.put(message.getSender(), persistentIds);
-        }
-      }
-      super.process(message, warn);
-    }
-
-    Map<DistributedMember, Set<PersistentID>> getResults() {
-      return results;
-    }
-  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupResponse.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupResponse.java
similarity index 70%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupResponse.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupResponse.java
index 9887327..aa793e9 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/FinishBackupResponse.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FinishBackupResponse.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.DataInput;
 import java.io.DataOutput;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskProcessor.java
new file mode 100644
index 0000000..999f9af
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskProcessor.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
+
+import java.util.Collection;
+
+class FlushToDiskProcessor extends AdminMultipleReplyProcessor {
+
+  FlushToDiskProcessor(DM dm, Collection initMembers) {
+    super(dm, initMembers);
+  }
+
+  @Override
+  protected boolean stopBecauseOfExceptions() {
+    return false;
+  }
+
+  @Override
+  protected void process(DistributionMessage message, boolean warn) {
+    super.process(message, warn);
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskRequest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskRequest.java
similarity index 67%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskRequest.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskRequest.java
index 1f6090a..501e8d1 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/FlushToDiskRequest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskRequest.java
@@ -1,20 +1,21 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
-import java.util.Collection;
 import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
@@ -22,10 +23,8 @@ import org.apache.logging.log4j.Logger;
 import org.apache.geode.CancelException;
 import org.apache.geode.cache.DiskStore;
 import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
 import org.apache.geode.internal.admin.remote.AdminResponse;
 import org.apache.geode.internal.admin.remote.CliLegacyMessage;
 import org.apache.geode.internal.cache.InternalCache;
@@ -99,20 +98,4 @@ public class FlushToDiskRequest extends CliLegacyMessage {
     return FLUSH_TO_DISK_REQUEST;
   }
 
-  static class FlushToDiskProcessor extends AdminMultipleReplyProcessor {
-
-    FlushToDiskProcessor(DM dm, Collection initMembers) {
-      super(dm, initMembers);
-    }
-
-    @Override
-    protected boolean stopBecauseOfExceptions() {
-      return false;
-    }
-
-    @Override
-    protected void process(DistributionMessage message, boolean warn) {
-      super.process(message, warn);
-    }
-  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskResponse.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskResponse.java
new file mode 100644
index 0000000..4195b5b
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskResponse.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.admin.remote.AdminResponse;
+
+/**
+ * The response to the {@link FlushToDiskRequest}
+ *
+ *
+ */
+public class FlushToDiskResponse extends AdminResponse {
+
+  public FlushToDiskResponse() {
+    super();
+  }
+
+  public FlushToDiskResponse(InternalDistributedMember sender) {
+    this.setRecipient(sender);
+  }
+
+  public int getDSFID() {
+    return FLUSH_TO_DISK_RESPONSE;
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getName();
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
new file mode 100644
index 0000000..41ec91e
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+
+import java.io.IOException;
+import java.util.HashSet;
+
+class PrepareBackup {
+
+  private final InternalDistributedMember member;
+  private final InternalCache cache;
+
+  PrepareBackup(InternalDistributedMember member, InternalCache cache) {
+    this.member = member;
+    this.cache = cache;
+  }
+
+  HashSet<PersistentID> run() throws IOException {
+    HashSet<PersistentID> persistentIds;
+    if (cache == null) {
+      persistentIds = new HashSet<>();
+    } else {
+      persistentIds = cache.startBackup(member).prepareForBackup();
+    }
+    return persistentIds;
+  }
+
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
new file mode 100644
index 0000000..7d45471
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+
+import java.util.HashSet;
+import java.util.Set;
+
+class PrepareBackupFactory {
+
+  PrepareBackupReplyProcessor createReplyProcessor(BackupResultCollector resultCollector, DM dm, Set<InternalDistributedMember> recipients) {
+    return new PrepareBackupReplyProcessor(resultCollector, dm, recipients);
+  }
+
+  PrepareBackupRequest createRequest(InternalDistributedMember sender, Set<InternalDistributedMember> recipients, int processorId, PrepareBackupFactory prepareBackupFactory) {
+    return new PrepareBackupRequest(sender, recipients, processorId, prepareBackupFactory);
+  }
+
+  PrepareBackup createPrepareBackup(InternalDistributedMember member, InternalCache cache) {
+    return new PrepareBackup(member, cache);
+  }
+
+  PrepareBackupResponse createPrepareBackupResponse(InternalDistributedMember sender, HashSet<PersistentID> persistentIds) {
+    return new PrepareBackupResponse(sender, persistentIds);
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupOperation.java
new file mode 100644
index 0000000..463cef3
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupOperation.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.CancelException;
+import org.apache.geode.annotations.TestingOnly;
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.logging.log4j.Logger;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+class PrepareBackupOperation implements BackupResultCollector {
+  private static final Logger logger = LogService.getLogger();
+
+  private final DM dm;
+  private final InternalDistributedMember member;
+  private final InternalCache cache;
+  private final Set<InternalDistributedMember> recipients;
+  private final PrepareBackupFactory prepareBackupFactory;
+
+  private final Map<DistributedMember, Set<PersistentID>> results = Collections.synchronizedMap(new HashMap<DistributedMember, Set<PersistentID>>());
+
+  PrepareBackupOperation(DM dm, InternalDistributedMember member, InternalCache cache, Set<InternalDistributedMember> recipients) {
+    this(dm, member, cache, recipients, new PrepareBackupFactory());
+  }
+
+  @TestingOnly
+  PrepareBackupOperation(DM dm, InternalDistributedMember member, InternalCache cache, Set<InternalDistributedMember> recipients, PrepareBackupFactory prepareBackupFactory) {
+    this.dm = dm;
+    this.member = member;
+    this.cache = cache;
+    this.recipients = recipients;
+    this.prepareBackupFactory = prepareBackupFactory;
+  }
+
+  Map<DistributedMember, Set<PersistentID>> send() {
+    PrepareBackupReplyProcessor replyProcessor = prepareBackupFactory.createReplyProcessor(this, dm, recipients);
+    PrepareBackupRequest request = prepareBackupFactory.createRequest(member, recipients, replyProcessor.getProcessorId(), prepareBackupFactory);
+
+    dm.putOutgoing(request);
+
+    processLocally();
+
+    try {
+      replyProcessor.waitForReplies();
+    } catch (ReplyException e) {
+      if (!(e.getCause() instanceof CancelException)) {
+        throw e;
+      }
+    } catch (InterruptedException e) {
+      logger.warn(e.getMessage(), e);
+    }
+
+    return results;
+  }
+
+  @Override
+  public void addToResults(InternalDistributedMember member, Set<PersistentID> persistentIds) {
+    if (persistentIds != null && !persistentIds.isEmpty()) {
+      results.put(member, persistentIds);
+    }
+  }
+
+  private void processLocally() {
+    try {
+      addToResults(member, prepareBackupFactory.createPrepareBackup(member, cache).run());
+    } catch (IOException e) {
+      logger.fatal("Failed to PrepareBackup in " + member, e);
+    }
+  }
+
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessor.java
new file mode 100644
index 0000000..9d1a1fb
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessor.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor;
+
+import java.util.Set;
+
+class PrepareBackupReplyProcessor extends AdminMultipleReplyProcessor {
+
+  private final BackupResultCollector resultCollector;
+
+  PrepareBackupReplyProcessor(BackupResultCollector resultCollector, DM dm, Set<InternalDistributedMember> recipients) {
+    super(dm, recipients);
+    this.resultCollector = resultCollector;
+  }
+
+  @Override
+  protected boolean stopBecauseOfExceptions() {
+    return false;
+  }
+
+  @Override
+  protected void process(DistributionMessage message, boolean warn) {
+    if (message instanceof PrepareBackupResponse) {
+      PrepareBackupResponse response = (PrepareBackupResponse) message;
+      resultCollector.addToResults(response.getSender(), response.getPersistentIds());
+    }
+    super.process(message, warn);
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupRequest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupRequest.java
new file mode 100644
index 0000000..ab7c8af
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupRequest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.admin.remote.AdminFailureResponse;
+import org.apache.geode.internal.admin.remote.AdminResponse;
+import org.apache.geode.internal.admin.remote.CliLegacyMessage;
+import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.internal.logging.log4j.LocalizedMessage;
+
+/**
+ * A request to from an admin VM to all non admin members to start a backup. In the prepare phase of
+ * the backup, the members will suspend bucket destroys to make sure buckets aren't missed during
+ * the backup.
+ */
+public class PrepareBackupRequest extends CliLegacyMessage {
+  private static final Logger logger = LogService.getLogger();
+
+  private PrepareBackupFactory prepareBackupFactory;
+
+  public PrepareBackupRequest() {
+    // nothing
+  }
+
+  PrepareBackupRequest(InternalDistributedMember sender, Set<InternalDistributedMember> recipients, int msgId, PrepareBackupFactory prepareBackupFactory) {
+    setSender(sender);
+    setRecipients(recipients);
+    this.msgId = msgId;
+    this.prepareBackupFactory = prepareBackupFactory;
+  }
+
+  @Override
+  protected AdminResponse createResponse(DM dm) {
+    HashSet<PersistentID> persistentIds;
+    try {
+      persistentIds = prepareBackupFactory.createPrepareBackup(dm.getDistributionManagerId(), dm.getCache()).run();
+    } catch (IOException e) {
+      logger.error(LocalizedMessage.create(LocalizedStrings.CliLegacyMessage_ERROR, getClass()), e);
+      return AdminFailureResponse.create(getSender(), e);
+    }
+    return prepareBackupFactory.createPrepareBackupResponse(getSender(), persistentIds);
+  }
+
+  @Override
+  public int getDSFID() {
+    return PREPARE_BACKUP_REQUEST;
+  }
+
+}
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupResponse.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupResponse.java
similarity index 60%
rename from geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupResponse.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupResponse.java
index 745e3ed..ed4e28d 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/PrepareBackupResponse.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupResponse.java
@@ -1,23 +1,26 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.HashSet;
+import java.util.Set;
 
 import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.persistence.PersistentID;
@@ -26,8 +29,6 @@ import org.apache.geode.internal.admin.remote.AdminResponse;
 
 /**
  * The response to the {@link PrepareBackupRequest}
- *
- *
  */
 public class PrepareBackupResponse extends AdminResponse {
 
@@ -39,11 +40,11 @@ public class PrepareBackupResponse extends AdminResponse {
 
   public PrepareBackupResponse(InternalDistributedMember sender,
       HashSet<PersistentID> persistentIds) {
-    this.setRecipient(sender);
+    setRecipient(sender);
     this.persistentIds = persistentIds;
   }
 
-  public HashSet<PersistentID> getPersistentIds() {
+  public Set<PersistentID> getPersistentIds() {
     return persistentIds;
   }
 
@@ -53,22 +54,13 @@ public class PrepareBackupResponse extends AdminResponse {
     persistentIds = DataSerializer.readHashSet(in);
   }
 
-
-
   @Override
   public void toData(DataOutput out) throws IOException {
     super.toData(out);
     DataSerializer.writeHashSet(persistentIds, out);
   }
 
-
-
   @Override
-  protected Object clone() throws CloneNotSupportedException {
-    // TODO Auto-generated method stub
-    return super.clone();
-  }
-
   public int getDSFID() {
     return PREPARE_BACKUP_RESPONSE;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/RestoreScript.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
similarity index 87%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/RestoreScript.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
index a3bd455..a9e53fa 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/RestoreScript.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedWriter;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/ScriptGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/ScriptGenerator.java
similarity index 52%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/ScriptGenerator.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/ScriptGenerator.java
index 286caa1..368b8d4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/ScriptGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/ScriptGenerator.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedWriter;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixBackupInspector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixBackupInspector.java
similarity index 69%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixBackupInspector.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixBackupInspector.java
index 2e0921b..642fe3f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixBackupInspector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixBackupInspector.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedReader;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixScriptGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
similarity index 68%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixScriptGenerator.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
index a7969e1..c71e8ac 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/UnixScriptGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
@@ -1,18 +1,23 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.internal.cache.backup.RestoreScript;
+import org.apache.geode.internal.cache.backup.ScriptGenerator;
 
 import java.io.BufferedWriter;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsBackupInspector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsBackupInspector.java
similarity index 73%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsBackupInspector.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsBackupInspector.java
index 13d4959..85372ad 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsBackupInspector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsBackupInspector.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedReader;
 import java.io.File;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsScriptGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsScriptGenerator.java
similarity index 80%
rename from geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsScriptGenerator.java
rename to geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsScriptGenerator.java
index 5327ae0..7a5306a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/WindowsScriptGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/WindowsScriptGenerator.java
@@ -1,23 +1,24 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
-import java.io.PrintWriter;
 
 class WindowsScriptGenerator implements ScriptGenerator {
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
index 2319776..168eb84 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
@@ -103,7 +103,7 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMembe
 import org.apache.geode.i18n.LogWriterI18n;
 import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.SystemTimer;
-import org.apache.geode.internal.cache.BackupManager;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.geode.internal.cache.CacheConfig;
 import org.apache.geode.internal.cache.CachePerfStats;
 import org.apache.geode.internal.cache.CacheServerImpl;
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java b/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java
index 1bf5215..d48c89f 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java
@@ -44,8 +44,8 @@ import javax.management.ObjectName;
 import org.apache.commons.lang.StringUtils;
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.admin.internal.BackupDataStoreHelper;
-import org.apache.geode.admin.internal.BackupDataStoreResult;
+import org.apache.geode.internal.cache.backup.BackupDataStoreHelper;
+import org.apache.geode.internal.cache.backup.BackupDataStoreResult;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java b/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
index 0afdfd1..96096ba 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
@@ -63,7 +63,7 @@ import org.apache.geode.distributed.internal.locks.DLockStats;
 import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.GemFireVersion;
 import org.apache.geode.internal.PureJavaMode;
-import org.apache.geode.internal.cache.BackupManager;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.geode.internal.cache.CachePerfStats;
 import org.apache.geode.internal.cache.DirectoryHolder;
 import org.apache.geode.internal.cache.DiskDirectoryStats;
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
index 26f7d8b..bd89918 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
@@ -25,7 +25,7 @@ import org.springframework.shell.core.annotation.CliOption;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.internal.cache.BackupUtil;
+import org.apache.geode.internal.cache.backup.BackupUtil;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.management.BackupStatus;
 import org.apache.geode.management.cli.CliMetaData;
diff --git a/geode-core/src/test/java/org/apache/geode/admin/internal/PrepareBackupRequestTest.java b/geode-core/src/test/java/org/apache/geode/admin/internal/PrepareBackupRequestTest.java
deleted file mode 100644
index 4be5e5d..0000000
--- a/geode-core/src/test/java/org/apache/geode/admin/internal/PrepareBackupRequestTest.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.admin.internal;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.InOrder;
-
-import org.apache.geode.admin.internal.PrepareBackupRequest.PrepareBackupReplyProcessor;
-import org.apache.geode.cache.CacheClosedException;
-import org.apache.geode.cache.persistence.PersistentID;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.distributed.internal.ReplyException;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.admin.remote.AdminFailureResponse;
-import org.apache.geode.internal.admin.remote.AdminResponse;
-import org.apache.geode.internal.cache.BackupManager;
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.test.junit.categories.UnitTest;
-
-@Category(UnitTest.class)
-public class PrepareBackupRequestTest {
-
-  private PrepareBackupRequest prepareBackupRequest;
-
-  private PrepareBackupReplyProcessor replyProcessor;
-  private DM dm;
-  private InternalCache cache;
-  private BackupManager backupManager;
-
-  private InternalDistributedMember localMember;
-  private InternalDistributedMember member1;
-  private InternalDistributedMember member2;
-
-  private Set<InternalDistributedMember> recipients;
-
-  @Before
-  public void setUp() throws Exception {
-    // mocks here
-    replyProcessor = mock(PrepareBackupReplyProcessor.class);
-    dm = mock(DM.class);
-    cache = mock(InternalCache.class);
-    backupManager = mock(BackupManager.class);
-
-    when(dm.getCache()).thenReturn(cache);
-    when(dm.getDistributionManagerId()).thenReturn(localMember);
-    when(cache.startBackup(any())).thenReturn(backupManager);
-    when(replyProcessor.getResults()).thenReturn(Collections.emptyMap());
-
-    localMember = mock(InternalDistributedMember.class);
-    member1 = mock(InternalDistributedMember.class);
-    member2 = mock(InternalDistributedMember.class);
-
-    recipients = new HashSet<>();
-    recipients.add(member1);
-    recipients.add(member2);
-
-    prepareBackupRequest = new PrepareBackupRequest(dm, recipients, replyProcessor);
-  }
-
-  @Test
-  public void getRecipientsReturnsRecipientMembers() throws Exception {
-    assertThat(prepareBackupRequest.getRecipients()).hasSize(2).contains(member1, member2);
-  }
-
-  @Test
-  public void getRecipientsDoesNotIncludeNull() throws Exception {
-    InternalDistributedMember nullMember = null;
-
-    assertThat(prepareBackupRequest.getRecipients()).doesNotContain(nullMember);
-  }
-
-  @Test
-  public void sendShouldUseDMToSendMessage() throws Exception {
-    prepareBackupRequest.send();
-
-    verify(dm, times(1)).putOutgoing(prepareBackupRequest);
-  }
-
-  @Test
-  public void sendShouldWaitForRepliesFromRecipients() throws Exception {
-    prepareBackupRequest.send();
-
-    verify(replyProcessor, times(1)).waitForReplies();
-  }
-
-  @Test
-  public void sendShouldReturnResultsContainingRecipientsAndLocalMember() throws Exception {
-    Set<PersistentID> localMember_PersistentIdSet = new HashSet<>();
-    localMember_PersistentIdSet.add(mock(PersistentID.class));
-    Set<PersistentID> member1_PersistentIdSet = new HashSet<>();
-    member1_PersistentIdSet.add(mock(PersistentID.class));
-    Set<PersistentID> member2_PersistentIdSet = new HashSet<>();
-    member2_PersistentIdSet.add(mock(PersistentID.class));
-    member2_PersistentIdSet.add(mock(PersistentID.class));
-    Map<DistributedMember, Set<PersistentID>> expectedResults = new HashMap<>();
-    expectedResults.put(localMember, localMember_PersistentIdSet);
-    expectedResults.put(member1, member1_PersistentIdSet);
-    expectedResults.put(member2, member2_PersistentIdSet);
-    when(replyProcessor.getResults()).thenReturn(expectedResults);
-
-    Map<DistributedMember, Set<PersistentID>> results = prepareBackupRequest.send();
-
-    assertThat(results).isEqualTo(expectedResults);
-  }
-
-  @Test
-  public void sendShouldInvokeProcessLocally() throws Exception {
-    prepareBackupRequest.send();
-
-    verify(replyProcessor, times(1)).process(any(AdminResponse.class), eq(false));
-  }
-
-  @Test
-  public void sendShouldInvokePrepareForBackup() throws Exception {
-    prepareBackupRequest.send();
-
-    verify(backupManager, times(1)).prepareForBackup();
-  }
-
-  @Test
-  public void sendShouldPrepareForBackupInLocalMemberBeforeWaitingForReplies() throws Exception {
-    InOrder inOrder = inOrder(backupManager, replyProcessor);
-
-    prepareBackupRequest.send();
-
-    // assert that prepareForBackup is invoked before invoking waitForReplies
-    inOrder.verify(backupManager, times(1)).prepareForBackup();
-    inOrder.verify(replyProcessor, times(1)).waitForReplies();
-  }
-
-  @Test
-  public void repliesWithFinishBackupResponse() throws Exception {
-    prepareBackupRequest.send();
-
-    verify(replyProcessor, times(1)).process(any(PrepareBackupResponse.class), eq(false));
-  }
-
-  @Test
-  public void repliesWithAdminFailureResponseWhenPrepareForBackupThrowsIOException()
-      throws Exception {
-    prepareBackupRequest = spy(prepareBackupRequest);
-    doThrow(new IOException()).when(prepareBackupRequest).prepareForBackup(dm);
-
-    prepareBackupRequest.send();
-
-    verify(replyProcessor, times(1)).process(any(AdminFailureResponse.class), eq(false));
-  }
-
-  @Test
-  public void sendShouldCompleteIfWaitForRepliesThrowsReplyExceptionCausedByCacheClosedException()
-      throws Exception {
-    doThrow(new ReplyException(new CacheClosedException())).when(replyProcessor).waitForReplies();
-
-    prepareBackupRequest.send();
-  }
-
-  @Test
-  public void sendShouldThrowIfWaitForRepliesThrowsReplyExceptionNotCausedByCacheClosedException()
-      throws Exception {
-    doThrow(new ReplyException(new NullPointerException())).when(replyProcessor).waitForReplies();
-
-    assertThatThrownBy(() -> prepareBackupRequest.send()).isInstanceOf(ReplyException.class)
-        .hasCauseInstanceOf(NullPointerException.class);
-  }
-
-  @Test
-  public void sendCompletesWhenWaitForRepliesThrowsInterruptedException() throws Exception {
-    doThrow(new InterruptedException()).when(replyProcessor).waitForReplies();
-
-    prepareBackupRequest.send();
-  }
-
-}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
similarity index 56%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/BackupDUnitTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
index 5d0cce9..e8c186b 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
@@ -1,49 +1,32 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
+import static org.apache.geode.test.dunit.Host.getHost;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.nio.file.Files;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Pattern;
-
 import org.apache.commons.io.FileUtils;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.admin.internal.PrepareBackupRequest;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.DiskStore;
@@ -61,46 +44,86 @@ import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
 import org.apache.geode.distributed.internal.ReplyMessage;
 import org.apache.geode.internal.admin.remote.AdminFailureResponse;
+import org.apache.geode.internal.cache.DestroyRegionOperation.DestroyRegionMessage;
+import org.apache.geode.internal.cache.DiskRegion;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.partitioned.PersistentPartitionedRegionTestBase;
 import org.apache.geode.management.BackupStatus;
-import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.DUnitEnv;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
 
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+
+/**
+ * Additional tests to consider adding:
+ * <ul>
+ * <li>Test default disk store.
+ * <li>Test backing up and recovering while a bucket move is in progress.
+ * <li>Test backing up and recovering while ops are in progress?
+ * </ul>
+ */
 @Category(DistributedTest.class)
-public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
-  Logger logger = LogManager.getLogger(BackupDUnitTest.class);
+@RunWith(JUnitParamsRunner.class)
+@SuppressWarnings("serial")
+public class BackupDistributedTest extends PersistentPartitionedRegionTestBase {
 
-  private static final long MAX_WAIT_SECONDS = 30;
   private VM vm0;
   private VM vm1;
+  private VM vm2;
+  private VM vm3;
+
+  @Before
+  public void setUp() throws Exception {
+    vm0 = getHost(0).getVM(0);
+    vm1 = getHost(0).getVM(1);
+    vm2 = getHost(0).getVM(2);
+    vm3 = getHost(0).getVM(3);
+  }
 
   @Override
   public final void preTearDownCacheTestCase() throws Exception {
+    vm0.invoke(() -> {
+      DistributionMessageObserver.setInstance(null);
+      disconnectFromDS();
+    });
+
     StringBuilder failures = new StringBuilder();
     delete(getBackupDir(), failures);
     if (failures.length() > 0) {
-      logger.error(failures.toString());
+      // logger.error(failures.toString());
     }
   }
 
   @Test
-  public void testBackupPR() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    logger.info("Creating region in VM0");
+  public void testBackupPR() throws Exception {
     createPersistentRegion(vm0);
-    logger.info("Creating region in VM1");
     createPersistentRegion(vm1);
 
     long lm0 = setBackupFiles(vm0);
@@ -110,11 +133,12 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     createData(vm0, 0, 5, "B", "region2");
 
     BackupStatus status = backup(vm2);
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     Collection<File> files = FileUtils.listFiles(getBackupDir(), new String[] {"txt"}, true);
-    assertEquals(4, files.size());
+    assertThat(files).hasSize(4);
+
     deleteOldUserUserFile(vm0);
     deleteOldUserUserFile(vm1);
     validateBackupComplete();
@@ -122,22 +146,14 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     createData(vm0, 0, 5, "C", "region1");
     createData(vm0, 0, 5, "C", "region2");
 
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     closeCache(vm0);
     closeCache(vm1);
 
-    // Destroy the current data
-    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
-      public void run() {
-        try {
-          cleanDiskDirs();
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
+    // destroy the current data
+    cleanDiskDirsInEveryVM();
 
     restoreBackup(2);
 
@@ -150,44 +166,35 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
   }
 
   /**
-   * Test of bug 42419
+   * Test of bug 42419.
+   *
+   * <p>
+   * TRAC 42419: backed up disk stores map contains null key instead of member; cannot restore
+   * backup files
    */
   @Test
-  public void testBackupFromMemberWithDiskStore() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-
-    logger.info("Creating region in VM0");
+  public void testBackupFromMemberWithDiskStore() throws Exception {
     createPersistentRegion(vm0);
-    logger.info("Creating region in VM1");
     createPersistentRegion(vm1);
 
     createData(vm0, 0, 5, "A", "region1");
     createData(vm0, 0, 5, "B", "region2");
 
     BackupStatus status = backup(vm1);
-    assertEquals(2, status.getBackedUpDiskStores().size());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+
     for (DistributedMember key : status.getBackedUpDiskStores().keySet()) {
-      assertNotNull(key);
+      assertThat(key).isNotNull();
     }
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     validateBackupComplete();
 
     closeCache(vm0);
     closeCache(vm1);
 
-    // Destroy the current data
-    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
-      public void run() {
-        try {
-          cleanDiskDirs();
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
+    // destroy the current data
+    invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
 
     restoreBackup(2);
 
@@ -197,34 +204,21 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     checkData(vm0, 0, 5, "B", "region2");
   }
 
-  private void createPersistentRegionsAsync() throws java.util.concurrent.ExecutionException,
-      InterruptedException, java.util.concurrent.TimeoutException {
-    logger.info("Creating region in VM0");
-    AsyncInvocation async0 = createPersistentRegionAsync(vm0);
-    logger.info("Creating region in VM1");
-    AsyncInvocation async1 = createPersistentRegionAsync(vm1);
-    async0.get(MAX_WAIT_SECONDS, TimeUnit.SECONDS);
-    async1.get(MAX_WAIT_SECONDS, TimeUnit.SECONDS);
-  }
-
   /**
    * Test for bug 42419
+   *
+   * <p>
+   * TRAC 42419: backed up disk stores map contains null key instead of member; cannot restore
+   * backup files
    */
   @Test
-  public void testBackupWhileBucketIsCreated() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-    final VM vm2 = host.getVM(2);
-
-    logger.info("Creating region in VM0");
+  public void testBackupWhileBucketIsCreated() throws Exception {
     createPersistentRegion(vm0);
 
     // create a bucket on vm0
     createData(vm0, 0, 1, "A", "region1");
 
     // create the pr on vm1, which won't have any buckets
-    logger.info("Creating region in VM1");
     createPersistentRegion(vm1);
 
     CompletableFuture<BackupStatus> backupStatusFuture =
@@ -234,29 +228,21 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     CompletableFuture.allOf(backupStatusFuture, createDataFuture);
 
     BackupStatus status = backupStatusFuture.get();
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     validateBackupComplete();
 
     createData(vm0, 0, 5, "C", "region1");
 
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     closeCache(vm0);
     closeCache(vm1);
 
-    // Destroy the current data
-    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
-      public void run() {
-        try {
-          cleanDiskDirs();
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
+    // destroy the current data
+    invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
 
     restoreBackup(2);
 
@@ -265,93 +251,81 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     checkData(vm0, 0, 1, "A", "region1");
   }
 
+  /**
+   * Test for bug 42420. Invoke a backup when a bucket is in the middle of being moved.
+   *
+   * <p>
+   * TRAC 42420: Online backup files sometimes cannot be restored
+   */
   @Test
-  public void testBackupWhileBucketIsMovedBackupBeforeSendDestroy() throws Throwable {
-    Host host = Host.getHost(0);
-    final VM vm2 = host.getVM(2);
+  @Parameters({"BEFORE_SENDING_DESTROYREGIONMESSAGE", "BEFORE_PROCESSING_REPLYMESSAGE"})
+  @TestCaseName("{method}({params})")
+  public void testWhileBucketIsMovedBackup(final WhenToInvokeBackup whenToInvokeBackup) throws Exception {
+    vm0.invoke("Add listener to invoke backup", () -> {
+      disconnectFromDS();
 
-    DistributionMessageObserver observer = new SerializableDistributionMessageObserver() {
-      private volatile boolean done;
+      // This listener will wait for a response to the
+      // destroy region message, and then trigger a backup.
+      // That will backup before this member has finished destroying
+      // a bucket, but after the peer has removed the bucket.
+      DistributionMessageObserver.setInstance(createTestHookToBackup(whenToInvokeBackup));
+    });
 
-      @Override
-      public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {
+    createPersistentRegion(vm0);
 
-        // The bucket move will send a destroy region message.
-        if (msg instanceof DestroyRegionOperation.DestroyRegionMessage && !done) {
-          backup(vm2);
-          done = true;
-        }
-      }
-    };
+    // create twos bucket on vm0
+    createData(vm0, 0, 2, "A", "region1");
 
-    backupWhileBucketIsMoved(observer);
-  }
+    // create the pr on vm1, which won't have any buckets
+    createPersistentRegion(vm1);
 
-  @Test
-  public void testBackupWhileBucketIsMovedBackupAfterSendDestroy() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-    final VM vm2 = host.getVM(2);
+    // Perform a rebalance. This will trigger the backup in the middle of the bucket move.
+    vm0.invoke("Do rebalance", () -> {
+      RebalanceOperation op = getCache().getResourceManager().createRebalanceFactory().start();
+      RebalanceResults results;
+      try {
+        results = op.getResults();
+        assertEquals(1, results.getTotalBucketTransfersCompleted());
+      } catch (InterruptedException e) {
+        throw new RuntimeException(e);
+      }
+    });
 
-    DistributionMessageObserver observer = new SerializableDistributionMessageObserver() {
-      private volatile boolean done;
-      private AtomicInteger count = new AtomicInteger();
-      private volatile int replyId = -0xBAD;
+    validateBackupComplete();
 
-      @Override
-      public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {
+    createData(vm0, 0, 5, "C", "region1");
 
-        // The bucket move will send a destroy region message.
-        if (msg instanceof DestroyRegionOperation.DestroyRegionMessage && !done) {
-          this.replyId = msg.getProcessorId();
-        }
-      }
+    closeCache(vm0);
+    closeCache(vm1);
 
-      @Override
-      public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
-        if (message instanceof ReplyMessage && replyId != -0xBAD
-            && replyId == message.getProcessorId() && !done
-        // we need two replies
-            && count.incrementAndGet() == 2) {
-          backup(vm2);
-          done = true;
-        }
+    // Destroy the current data
+    invokeInEveryVM("Clean disk dirs", () -> {
+      try {
+        cleanDiskDirs();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
       }
-    };
+    });
+
+    restoreBackup(2);
+
+    createPersistentRegionsAsync();
 
-    backupWhileBucketIsMoved(observer);
+    checkData(vm0, 0, 2, "A", "region1");
   }
 
   @Test
-  public void testBackupStatusCleanedUpAfterFailureOnOneMember() throws Throwable {
-    IgnoredException.addIgnoredException("Uncaught exception");
-    IgnoredException.addIgnoredException("Stop processing");
-    Host host = Host.getHost(0);
-    final VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    final VM vm2 = host.getVM(2);
-
-    // Create an observer that will fail a backup
-    // When this member receives a prepare
-    DistributionMessageObserver observer = new SerializableDistributionMessageObserver() {
-      @Override
-      public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
-        if (message instanceof PrepareBackupRequest) {
-          DistributionMessageObserver.setInstance(null);
-          IOException exception = new IOException("Backup in progess");
-          AdminFailureResponse response =
-              AdminFailureResponse.create(message.getSender(), exception);
-          response.setMsgId(((PrepareBackupRequest) message).getMsgId());
-          dm.putOutgoing(response);
-          throw new RuntimeException("Stop processing");
-        }
-      }
-    };
+  public void testBackupStatusCleanedUpAfterFailureOnOneMember() throws Exception {
+    addIgnoredException("Uncaught exception");
+    addIgnoredException("Stop processing");
+
+    String exceptionMessage = "Backup in progress";
 
     vm0.invoke(() -> {
       disconnectFromDS();
-      DistributionMessageObserver.setInstance(observer);
+      // create an observer that will fail a backup when this member receives a prepare
+      DistributionMessageObserver.setInstance(
+          createTestHookToThrowIOExceptionBeforeProcessingPrepareBackupRequest(exceptionMessage));
     });
 
     createPersistentRegion(vm0);
@@ -360,149 +334,37 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     createData(vm0, 0, 5, "A", "region1");
     createData(vm0, 0, 5, "B", "region2");
 
-    try {
-      backup(vm2);
-      fail("Backup should have failed with in progress exception");
-    } catch (Exception expected) {
-      // that's ok, hte backup should have failed
-    }
+    assertThatThrownBy(() -> backup(vm2)).hasRootCauseInstanceOf(IOException.class);
 
-    // A second backup should succeed because the observer
-    // has been cleared and the backup state should be cleared.
+    // second backup should succeed because the observer and backup state has been cleared
     BackupStatus status = backup(vm2);
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
-
-
-  }
-
-  /**
-   * Test for bug 42420. Invoke a backup when a bucket is in the middle of being moved.
-   *
-   * @param observer - a message observer that triggers at the backup at the correct time.
-   */
-  private void backupWhileBucketIsMoved(final DistributionMessageObserver observer)
-      throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-
-    vm0.invoke(new SerializableRunnable("Add listener to invoke backup") {
-
-      public void run() {
-        disconnectFromDS();
-
-        // This listener will wait for a response to the
-        // destroy region message, and then trigger a backup.
-        // That will backup before this member has finished destroying
-        // a bucket, but after the peer has removed the bucket.
-        DistributionMessageObserver.setInstance(observer);
-      }
-    });
-    try {
-
-      logger.info("Creating region in VM0");
-      createPersistentRegion(vm0);
-
-      // create twos bucket on vm0
-      createData(vm0, 0, 2, "A", "region1");
-
-      // create the pr on vm1, which won't have any buckets
-      logger.info("Creating region in VM1");
-
-      createPersistentRegion(vm1);
-
-      // Perform a rebalance. This will trigger the backup in the middle
-      // of the bucket move.
-      vm0.invoke(new SerializableRunnable("Do rebalance") {
-
-        public void run() {
-          Cache cache = getCache();
-          RebalanceOperation op = cache.getResourceManager().createRebalanceFactory().start();
-          RebalanceResults results;
-          try {
-            results = op.getResults();
-            assertEquals(1, results.getTotalBucketTransfersCompleted());
-          } catch (Exception e) {
-            Assert.fail("interupted", e);
-          }
-        }
-      });
-
-      validateBackupComplete();
-
-      createData(vm0, 0, 5, "C", "region1");
-
-      closeCache(vm0);
-      closeCache(vm1);
-
-      // Destroy the current data
-      Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
-        public void run() {
-          try {
-            cleanDiskDirs();
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-        }
-      });
-
-      restoreBackup(2);
-
-      createPersistentRegionsAsync();
-
-      checkData(vm0, 0, 2, "A", "region1");
-    } finally {
-      // cleanup the distribution message observer
-      vm0.invoke(new SerializableRunnable() {
-        public void run() {
-          DistributionMessageObserver.setInstance(null);
-          disconnectFromDS();
-        }
-      });
-    }
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
   }
 
   /**
    * Make sure we don't report members without persistent data as backed up.
    */
   @Test
-  public void testBackupOverflow() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    logger.info("Creating region in VM0");
+  public void testBackupOverflow() throws Exception {
     createPersistentRegion(vm0);
-    logger.info("Creating region in VM1");
     createOverflowRegion(vm1);
 
     createData(vm0, 0, 5, "A", "region1");
     createData(vm0, 0, 5, "B", "region2");
 
     BackupStatus status = backup(vm2);
-    assertEquals("Backed up disk stores  " + status, 1, status.getBackedUpDiskStores().size());
-    assertEquals(2, status.getBackedUpDiskStores().values().iterator().next().size());
-    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());
+    assertThat(status.getBackedUpDiskStores()).hasSize(1);
+    assertThat(status.getBackedUpDiskStores().values().iterator().next()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).isEmpty();
 
     validateBackupComplete();
-
   }
 
   @Test
-  public void testBackupPRWithOfflineMembers() throws Throwable {
-    Host host = Host.getHost(0);
-    vm0 = host.getVM(0);
-    vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    logger.info("Creating region in VM0");
+  public void testBackupPRWithOfflineMembers() throws Exception {
     createPersistentRegion(vm0);
-    logger.info("Creating region in VM1");
     createPersistentRegion(vm1);
-    logger.info("Creating region in VM2");
     createPersistentRegion(vm2);
 
     createData(vm0, 0, 5, "A", "region1");
@@ -511,15 +373,93 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     closeCache(vm2);
 
     BackupStatus status = backup(vm3);
-    assertEquals(2, status.getBackedUpDiskStores().size());
-    assertEquals(2, status.getOfflineDiskStores().size());
+    assertThat(status.getBackedUpDiskStores()).hasSize(2);
+    assertThat(status.getOfflineDiskStores()).hasSize(2);
   }
 
-  // TODO
-  // Test default disk store.
-  // Test backing up and recovering while a bucket move is in progress.
-  // Test backing up and recovering while ops are in progress?
+  private DistributionMessageObserver createTestHookToBackup(WhenToInvokeBackup backupInvocationTestHook) {
+    switch (backupInvocationTestHook) {
+      case BEFORE_SENDING_DESTROYREGIONMESSAGE:
+        return createTestHookToBackupBeforeSendingDestroyRegionMessage(() -> backup(vm2));
+      case BEFORE_PROCESSING_REPLYMESSAGE:
+        return createTestHookToBackupBeforeProcessingReplyMessage(() -> backup(vm2));
+      default:
+        throw new AssertionError("Invalid backupInvocationTestHook " + backupInvocationTestHook);
+    }
+  }
 
+  private DistributionMessageObserver createTestHookToBackupBeforeProcessingReplyMessage(Runnable task) {
+    return new DistributionMessageObserver() {
+      private volatile boolean done;
+      private final AtomicInteger count = new AtomicInteger();
+      private volatile int replyId = -0xBAD;
+
+      @Override
+      public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
+        // the bucket move will send a destroy region message.
+        if (message instanceof DestroyRegionMessage && !done) {
+          this.replyId = message.getProcessorId();
+        }
+      }
+
+      @Override
+      public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
+        if (message instanceof ReplyMessage && replyId != -0xBAD && replyId == message.getProcessorId() && !done && count.incrementAndGet() == 2) {
+          task.run();
+          done = true;
+        }
+      }
+    };
+  }
+
+  private DistributionMessageObserver createTestHookToBackupBeforeSendingDestroyRegionMessage(Runnable task) {
+    return new DistributionMessageObserver() {
+      private volatile boolean done;
+
+      @Override
+      public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
+        // the bucket move will send a destroy region message.
+        if (message instanceof DestroyRegionMessage && !done) {
+          task.run();
+          done = true;
+        }
+      }
+    };
+  }
+
+  private void cleanDiskDirsInEveryVM() {
+    invokeInEveryVM("cleanDiskDirsInEveryVM", () -> {
+      try {
+        cleanDiskDirs();
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    });
+  }
+
+  private DistributionMessageObserver createTestHookToThrowIOExceptionBeforeProcessingPrepareBackupRequest(final String exceptionMessage) {
+    return new DistributionMessageObserver() {
+      @Override
+      public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
+        if (message instanceof PrepareBackupRequest) {
+          DistributionMessageObserver.setInstance(null);
+          IOException exception = new IOException(exceptionMessage);
+          AdminFailureResponse response =
+              AdminFailureResponse.create(message.getSender(), exception);
+          response.setMsgId(((PrepareBackupRequest) message).getMsgId());
+          dm.putOutgoing(response);
+          throw new RuntimeException("Stop processing"); // TODO:KIRK: what is this?
+        }
+      }
+    };
+  }
+
+  private void createPersistentRegionsAsync() throws ExecutionException, InterruptedException {
+    AsyncInvocation async0 = createPersistentRegionAsync(vm0);
+    AsyncInvocation async1 = createPersistentRegionAsync(vm1);
+    async0.await();
+    async1.await();
+  }
 
   private void validateBackupComplete() {
     File backupDir = getBackupDir();
@@ -529,19 +469,13 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     assertTrue(files.length == 0);
   }
 
-  private void createPersistentRegion(VM vm) throws Throwable {
-    AsyncInvocation future = createPersistentRegionAsync(vm);
-    future.get(MAX_WAIT_SECONDS, TimeUnit.SECONDS);
-    if (future.isAlive()) {
-      fail("Region not created within" + MAX_WAIT_SECONDS);
-    }
-    if (future.exceptionOccurred()) {
-      throw new RuntimeException(future.getException());
-    }
+  private void createPersistentRegion(VM vm) throws Exception {
+    createPersistentRegionAsync(vm).await();
   }
 
   private void deleteOldUserUserFile(final VM vm) {
     SerializableRunnable validateUserFileBackup = new SerializableRunnable("set user backups") {
+      @Override
       public void run() {
         try {
           FileUtils.deleteDirectory(new File("userbackup_" + vm.getId()));
@@ -555,6 +489,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
   private long setBackupFiles(final VM vm) {
     SerializableCallable setUserBackups = new SerializableCallable("set user backups") {
+      @Override
       public Object call() {
         final int pid = DUnitEnv.get().getPid();
         File vmdir = new File("userbackup_" + pid);
@@ -587,6 +522,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
   private void verifyUserFileRestored(VM vm, final long lm) {
     vm.invoke(new SerializableRunnable() {
+      @Override
       public void run() {
         final int pid = DUnitEnv.get().getPid();
         File vmdir = new File("userbackup_" + pid);
@@ -615,6 +551,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
   private AsyncInvocation createPersistentRegionAsync(final VM vm) {
     SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
+      @Override
       public void run() {
         Cache cache = getCache();
         DiskStoreFactory dsf = cache.createDiskStoreFactory();
@@ -644,6 +581,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
   private void createOverflowRegion(final VM vm) {
     SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
+      @Override
       public void run() {
         Cache cache = getCache();
         DiskStoreFactory dsf = cache.createDiskStoreFactory();
@@ -663,14 +601,17 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     vm.invoke(createRegion);
   }
 
+  @Override
   protected void createData(VM vm, final int startKey, final int endKey, final String value) {
-    createData(vm, startKey, endKey, value, PR_REGION_NAME);
+    createData(vm, startKey, endKey, value, getPartitionedRegionName());
   }
 
+  @Override
   protected void createData(VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
     SerializableRunnable createData = new SerializableRunnable() {
 
+      @Override
       public void run() {
         Cache cache = getCache();
         Region region = cache.getRegion(regionName);
@@ -683,14 +624,17 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     vm.invoke(createData);
   }
 
-  protected void checkData(VM vm0, final int startKey, final int endKey, final String value) {
-    checkData(vm0, startKey, endKey, value, PR_REGION_NAME);
+  @Override
+  protected void checkData(VM vm, final int startKey, final int endKey, final String value) {
+    checkData(vm, startKey, endKey, value, getPartitionedRegionName());
   }
 
-  protected void checkData(VM vm0, final int startKey, final int endKey, final String value,
+  @Override
+  protected void checkData(VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
     SerializableRunnable checkData = new SerializableRunnable() {
 
+      @Override
       public void run() {
         Cache cache = getCache();
         Region region = cache.getRegion(regionName);
@@ -701,11 +645,13 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
       }
     };
 
-    vm0.invoke(checkData);
+    vm.invoke(checkData);
   }
 
+  @Override
   protected void closeCache(final VM vm) {
     SerializableRunnable closeCache = new SerializableRunnable("close cache") {
+      @Override
       public void run() {
         Cache cache = getCache();
         cache.close();
@@ -714,13 +660,16 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     vm.invoke(closeCache);
   }
 
-  protected Set<Integer> getBucketList(VM vm0) {
-    return getBucketList(vm0, PR_REGION_NAME);
+  @Override
+  protected Set<Integer> getBucketList(VM vm) {
+    return getBucketList(vm, getPartitionedRegionName());
   }
 
-  protected Set<Integer> getBucketList(VM vm0, final String regionName) {
+  @Override
+  protected Set<Integer> getBucketList(VM vm, final String regionName) {
     SerializableCallable getBuckets = new SerializableCallable("get buckets") {
 
+      @Override
       public Object call() throws Exception {
         Cache cache = getCache();
         PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
@@ -728,7 +677,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
       }
     };
 
-    return (Set<Integer>) vm0.invoke(getBuckets);
+    return (Set<Integer>) vm.invoke(getBuckets);
   }
 
   private File[] getDiskDirs(String dsName) {
@@ -743,9 +692,22 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     return DataPolicy.PERSISTENT_PARTITION;
   }
 
-  private static class SerializableDistributionMessageObserver extends DistributionMessageObserver
-      implements Serializable {
-
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
+      }
+    });
   }
 
   /**
@@ -754,8 +716,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
    * much as possible.
    */
   public static void delete(File file, StringBuilder failures) {
-    if (!file.exists())
+    if (!file.exists()) {
       return;
+    }
 
     if (file.isDirectory()) {
       File[] fileList = file.listFiles();
@@ -775,4 +738,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
       }
     }
   }
+
+  enum WhenToInvokeBackup {
+    BEFORE_SENDING_DESTROYREGIONMESSAGE,
+    BEFORE_PROCESSING_REPLYMESSAGE
+  }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupInspectorIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupInspectorIntegrationTest.java
similarity index 90%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupInspectorIntegrationTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupInspectorIntegrationTest.java
index 704a6e1..75e5daf 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupInspectorIntegrationTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupInspectorIntegrationTest.java
@@ -1,35 +1,36 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
+import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TemporaryFolder;
 
-import org.apache.geode.test.junit.categories.IntegrationTest;
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * Tests for the BackupInspector.
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
similarity index 89%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/BackupJUnitTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
index af85980..e229272 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
@@ -1,24 +1,48 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
 import static org.apache.geode.distributed.ConfigurationProperties.CACHE_XML_FILE;
 import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
 import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
 import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.filefilter.DirectoryFileFilter;
+import org.apache.commons.io.filefilter.RegexFileFilter;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -35,26 +59,8 @@ import java.util.Properties;
 import java.util.Random;
 import java.util.concurrent.CompletableFuture;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.DirectoryFileFilter;
-import org.apache.commons.io.filefilter.RegexFileFilter;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.DataPolicy;
-import org.apache.geode.cache.DiskStore;
-import org.apache.geode.cache.DiskStoreFactory;
-import org.apache.geode.cache.EvictionAction;
-import org.apache.geode.cache.EvictionAttributes;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionFactory;
-import org.apache.geode.test.junit.categories.IntegrationTest;
-
 @Category(IntegrationTest.class)
-public class BackupJUnitTest {
+public class BackupIntegrationTest {
 
   private static final String DISK_STORE_NAME = "diskStore";
   private GemFireCacheImpl cache = null;
@@ -68,7 +74,7 @@ public class BackupJUnitTest {
   private final Random random = new Random();
 
   private String getName() {
-    return "BackupJUnitTest_" + System.identityHashCode(this);
+    return "BackupIntegrationTest_" + System.identityHashCode(this);
   }
 
   @Before
@@ -79,7 +85,7 @@ public class BackupJUnitTest {
       String tmpDirName = System.getProperty("java.io.tmpdir");
       tmpDir = new File(tmpDirName == null ? "" : tmpDirName);
       try {
-        URL url = BackupJUnitTest.class.getResource("BackupJUnitTest.cache.xml");
+        URL url = BackupIntegrationTest.class.getResource("BackupIntegrationTest.cache.xml");
         cacheXmlFile = new File(url.toURI().getPath());
       } catch (URISyntaxException e) {
         throw new ExceptionInInitializerError(e);
@@ -99,7 +105,7 @@ public class BackupJUnitTest {
     diskDirs[1].mkdir();
   }
 
-  private void createCache() throws IOException {
+  private void createCache() {
     cache = (GemFireCacheImpl) new CacheFactory(props).create();
   }
 
@@ -119,15 +125,15 @@ public class BackupJUnitTest {
   }
 
   @Test
-  public void testBackupAndRecover() throws IOException, InterruptedException {
+  public void testBackupAndRecover() throws Exception {
     backupAndRecover(() -> {
       createDiskStore();
-      return BackupJUnitTest.this.createRegion();
+      return createRegion();
     });
   }
 
   @Test
-  public void testBackupAndRecoverOldConfig() throws IOException, InterruptedException {
+  public void testBackupAndRecoverOldConfig() throws Exception {
     backupAndRecover(() -> {
       createDiskStore();
       RegionFactory regionFactory = cache.createRegionFactory();
@@ -227,7 +233,7 @@ public class BackupJUnitTest {
 
 
   @Test
-  public void testBackupEmptyDiskStore() throws IOException, InterruptedException {
+  public void testBackupEmptyDiskStore() throws Exception {
     createDiskStore();
 
     BackupManager backup =
@@ -239,7 +245,7 @@ public class BackupJUnitTest {
   }
 
   @Test
-  public void testBackupOverflowOnlyDiskStore() throws IOException, InterruptedException {
+  public void testBackupOverflowOnlyDiskStore() throws Exception {
     createDiskStore();
     Region region = createOverflowRegion();
     // Put another key to make sure we restore
@@ -258,7 +264,7 @@ public class BackupJUnitTest {
 
 
   @Test
-  public void testCompactionDuringBackup() throws IOException, InterruptedException {
+  public void testCompactionDuringBackup() throws Exception {
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     dsf.setDiskDirs(diskDirs);
     dsf.setMaxOplogSize(1);
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupLockTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupLockTest.java
similarity index 76%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/BackupLockTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupLockTest.java
index a2939eb..79efdb5 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BackupLockTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupLockTest.java
@@ -1,35 +1,33 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.awaitility.Awaitility.await;
 
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
+import org.apache.geode.test.junit.categories.UnitTest;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.test.junit.categories.UnitTest;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 @Category(UnitTest.class)
 public class BackupLockTest {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupPrepareAndFinishMsgDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
similarity index 88%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupPrepareAndFinishMsgDUnitTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
index 2dfc16c..12f4335 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/BackupPrepareAndFinishMsgDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
@@ -1,47 +1,25 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-
-import org.awaitility.Awaitility;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.admin.internal.FinishBackupRequest;
-import org.apache.geode.admin.internal.PrepareBackupRequest;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.DiskStore;
 import org.apache.geode.cache.DiskStoreFactory;
@@ -55,13 +33,32 @@ import org.apache.geode.cache.query.QueryInvocationTargetException;
 import org.apache.geode.cache.query.TypeMismatchException;
 import org.apache.geode.cache30.CacheTestCase;
 import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.internal.cache.BackupLock;
 import org.apache.geode.internal.cache.DiskStoreImpl;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.awaitility.Awaitility;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
 
 @Category({DistributedTest.class})
 public abstract class BackupPrepareAndFinishMsgDUnitTest extends CacheTestCase {
+
   // Although this test does not make use of other members, the current member needs to be
   // a distributed member (rather than local) because it sends prepare and finish backup messages
   private static final String TEST_REGION_NAME = "TestRegion";
@@ -141,7 +138,7 @@ public abstract class BackupPrepareAndFinishMsgDUnitTest extends CacheTestCase {
     DM dm = GemFireCacheImpl.getInstance().getDistributionManager();
     Set recipients = dm.getOtherDistributionManagerIds();
     Future<Void> future = null;
-    PrepareBackupRequest.send(dm, recipients);
+    new PrepareBackupOperation(dm, dm.getId(), dm.getCache(), recipients).send();
     waitingForBackupLockCount = 0;
     future = CompletableFuture.runAsync(function);
     Awaitility.await().atMost(5, TimeUnit.SECONDS)
@@ -153,7 +150,7 @@ public abstract class BackupPrepareAndFinishMsgDUnitTest extends CacheTestCase {
   private void doReadActionsAndVerifyCompletion() {
     DM dm = GemFireCacheImpl.getInstance().getDistributionManager();
     Set recipients = dm.getOtherDistributionManagerIds();
-    PrepareBackupRequest.send(dm, recipients);
+    new PrepareBackupOperation(dm, dm.getId(), dm.getCache(), recipients).send();
     waitingForBackupLockCount = 0;
     List<CompletableFuture<?>> futureList = doReadActions();
     CompletableFuture.allOf(futureList.toArray(new CompletableFuture<?>[futureList.size()]));
diff --git a/geode-core/src/test/java/org/apache/geode/admin/internal/FinishBackupRequestTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupRequestTest.java
similarity index 89%
rename from geode-core/src/test/java/org/apache/geode/admin/internal/FinishBackupRequestTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupRequestTest.java
index 2bbd90f..f90e68e 100644
--- a/geode-core/src/test/java/org/apache/geode/admin/internal/FinishBackupRequestTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupRequestTest.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
@@ -25,20 +27,6 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.InOrder;
-
-import org.apache.geode.admin.internal.FinishBackupRequest.FinishBackupReplyProcessor;
 import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
@@ -47,9 +35,20 @@ import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.admin.remote.AdminFailureResponse;
 import org.apache.geode.internal.admin.remote.AdminResponse;
-import org.apache.geode.internal.cache.BackupManager;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.InOrder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 
 @Category(UnitTest.class)
 public class FinishBackupRequestTest {
diff --git a/geode-core/src/test/java/org/apache/geode/admin/internal/FlushToDiskRequestTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
similarity index 87%
rename from geode-core/src/test/java/org/apache/geode/admin/internal/FlushToDiskRequestTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
index e16430f..97fb747 100644
--- a/geode-core/src/test/java/org/apache/geode/admin/internal/FlushToDiskRequestTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
@@ -1,18 +1,20 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.admin.internal;
+package org.apache.geode.internal.cache.backup;
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
@@ -25,17 +27,6 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.InOrder;
-
-import org.apache.geode.admin.internal.FlushToDiskRequest.FlushToDiskProcessor;
 import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.DiskStore;
 import org.apache.geode.distributed.internal.DM;
@@ -44,6 +35,15 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMembe
 import org.apache.geode.internal.admin.remote.AdminResponse;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.InOrder;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
 
 @Category(UnitTest.class)
 public class FlushToDiskRequestTest {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/IncrementalBackupDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
similarity index 96%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/IncrementalBackupDUnitTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
index cc245f6..deeacd0 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/IncrementalBackupDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
@@ -1,42 +1,30 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache;
+package org.apache.geode.internal.cache.backup;
 
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.junit.Assert.*;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.file.Files;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.filefilter.DirectoryFileFilter;
 import org.apache.commons.io.filefilter.RegexFileFilter;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
 import org.apache.geode.admin.AdminDistributedSystem;
 import org.apache.geode.admin.internal.AdminDistributedSystemImpl;
 import org.apache.geode.cache.Cache;
@@ -51,6 +39,8 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.internal.ClassPathLoader;
 import org.apache.geode.internal.DeployedJar;
+import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.util.IOUtils;
 import org.apache.geode.internal.util.TransformUtils;
 import org.apache.geode.management.BackupStatus;
@@ -65,13 +55,29 @@ import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.file.Files;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Pattern;
 
 /**
  * Tests for the incremental backup feature.
  */
 @Category(DistributedTest.class)
 @SuppressWarnings("serial")
-public class IncrementalBackupDUnitTest extends JUnit4CacheTestCase {
+public class IncrementalBackupDistributedTest extends JUnit4CacheTestCase {
   /**
    * Data load increment.
    */
@@ -124,7 +130,7 @@ public class IncrementalBackupDUnitTest extends JUnit4CacheTestCase {
    * @param message a message to log.
    */
   private void log(String message) {
-    LogWriterUtils.getLogWriter().info("[IncrementalBackupDUnitTest] " + message);
+    LogWriterUtils.getLogWriter().info("[IncrementalBackupDistributedTest] " + message);
   }
 
   /**
@@ -354,7 +360,7 @@ public class IncrementalBackupDUnitTest extends JUnit4CacheTestCase {
 
       @Override
       public String description() {
-        return "[IncrementalBackupDUnitTest] Waiting for missing member " + id;
+        return "[IncrementalBackupDistributedTest] Waiting for missing member " + id;
       }
     }, 10000, 500, false);
 
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PartitionedBackupPrepareAndFinishMsgDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PartitionedBackupPrepareAndFinishMsgDUnitTest.java
new file mode 100644
index 0000000..bd931d9
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PartitionedBackupPrepareAndFinishMsgDUnitTest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+
+public class PartitionedBackupPrepareAndFinishMsgDUnitTest
+    extends BackupPrepareAndFinishMsgDUnitTest {
+
+  @Override
+  public Region<Integer, Integer> createRegion() {
+    return createRegion(RegionShortcut.PARTITION_PERSISTENT);
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
new file mode 100644
index 0000000..c202ab8
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.geode.CancelCriterion;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.HashSet;
+import java.util.Set;
+
+@Category(UnitTest.class)
+public class PrepareBackupFactoryTest {
+
+  private PrepareBackupFactory prepareBackupFactory;
+
+  private BackupResultCollector resultCollector;
+  private DM dm;
+  private InternalDistributedMember sender;
+  private Set<InternalDistributedMember> recipients;
+  private InternalDistributedMember member;
+  private InternalCache cache;
+
+  @Before
+  public void setUp() throws Exception {
+    resultCollector = mock(BackupResultCollector.class);
+    dm = mock(DM.class);
+    sender = mock(InternalDistributedMember.class);
+    member = mock(InternalDistributedMember.class);
+    cache = mock(InternalCache.class);
+
+    recipients = new HashSet<>();
+
+    when(dm.getSystem()).thenReturn(mock(InternalDistributedSystem.class));
+    when(dm.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
+
+    prepareBackupFactory = new PrepareBackupFactory();
+  }
+
+  @Test
+  public void createReplyProcessorReturnsPrepareBackupReplyProcessor() throws Exception {
+    assertThat(prepareBackupFactory.createReplyProcessor(resultCollector, dm, recipients))
+        .isInstanceOf(PrepareBackupReplyProcessor.class);
+  }
+
+  @Test
+  public void createRequestReturnsPrepareBackupRequest() throws Exception {
+    assertThat(prepareBackupFactory.createRequest(sender, recipients, 1, prepareBackupFactory))
+        .isInstanceOf(PrepareBackupRequest.class);
+  }
+
+  @Test
+  public void createPrepareBackupReturnsPrepareBackup() throws Exception {
+    assertThat(prepareBackupFactory.createPrepareBackup(member, cache))
+        .isInstanceOf(PrepareBackup.class);
+  }
+}
\ No newline at end of file
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
new file mode 100644
index 0000000..800b976
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.geode.annotations.TestingOnly;
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+@Category(UnitTest.class)
+public class PrepareBackupOperationTest {
+
+  private DM dm;
+  private InternalCache cache;
+  private Set<InternalDistributedMember> recipients;
+
+  private InternalDistributedMember sender;
+  private InternalDistributedMember member1;
+  private InternalDistributedMember member2;
+
+  private PrepareBackupFactory prepareBackupFactory;
+  private PrepareBackupReplyProcessor prepareBackupReplyProcessor;
+  private PrepareBackupRequest prepareBackupRequest;
+  private PrepareBackup prepareBackup;
+
+  private PrepareBackupOperation prepareBackupOperation;
+
+  @Before
+  public void setUp() throws Exception {
+    dm = mock(DM.class);
+    cache = mock(InternalCache.class);
+
+    prepareBackupReplyProcessor = mock(PrepareBackupReplyProcessor.class);
+    prepareBackupRequest = mock(PrepareBackupRequest.class);
+    prepareBackup = mock(PrepareBackup.class);
+
+    prepareBackupFactory = mock(PrepareBackupFactory.class);
+
+    sender = mock(InternalDistributedMember.class, "sender");
+    member1 = mock(InternalDistributedMember.class, "member1");
+    member2 = mock(InternalDistributedMember.class, "member2");
+    recipients = new HashSet<>();
+
+    prepareBackupOperation = new PrepareBackupOperation(dm, sender, cache, recipients, prepareBackupFactory);
+
+    when(prepareBackupReplyProcessor.getProcessorId()).thenReturn(42);
+
+    when(prepareBackupFactory.createReplyProcessor(eq(prepareBackupOperation), eq(dm), eq(recipients))).thenReturn(prepareBackupReplyProcessor);
+    when(prepareBackupFactory.createRequest(eq(sender), eq(recipients), eq(42), eq(prepareBackupFactory))).thenReturn(prepareBackupRequest);
+    when(prepareBackupFactory.createPrepareBackup(eq(sender), eq(cache))).thenReturn(prepareBackup);
+  }
+
+  @Test
+  public void sendShouldSendPrepareBackupMessage() throws Exception {
+    prepareBackupOperation.send();
+
+    verify(dm, times(1)).putOutgoing(prepareBackupRequest);
+  }
+
+  @Test
+  public void sendReturnsResultsForRemoteRecipient() throws Exception {
+    HashSet<PersistentID> persistentIdsForMember1 = new HashSet<>();
+    persistentIdsForMember1.add(mock(PersistentID.class));
+    doAnswer(invokeAddToResults(new MemberWithPersistentIds(member1, persistentIdsForMember1)))
+        .when(prepareBackupReplyProcessor).waitForReplies();
+
+    assertThat(prepareBackupOperation.send()).containsOnlyKeys(member1).containsValues(persistentIdsForMember1);
+  }
+
+  @Test
+  public void sendReturnsResultsForLocalMember() throws Exception {
+    HashSet<PersistentID> persistentIdsForSender = new HashSet<>();
+    persistentIdsForSender.add(mock(PersistentID.class));
+    when(prepareBackup.run()).thenReturn(persistentIdsForSender);
+
+    assertThat(prepareBackupOperation.send()).containsOnlyKeys(sender).containsValue(persistentIdsForSender);
+  }
+
+  @Test
+  public void sendReturnsResultsForAllMembers() throws Exception {
+    HashSet<PersistentID> persistentIdsForMember1 = new HashSet<>();
+    persistentIdsForMember1.add(mock(PersistentID.class));
+
+    HashSet<PersistentID> persistentIdsForMember2 = new HashSet<>();
+    persistentIdsForMember2.add(mock(PersistentID.class));
+
+    MemberWithPersistentIds[] ids = new MemberWithPersistentIds[] {
+        new MemberWithPersistentIds(member1, persistentIdsForMember1),
+        new MemberWithPersistentIds(member2, persistentIdsForMember2) };
+
+    doAnswer(invokeAddToResults(ids)).when(prepareBackupReplyProcessor)
+        .waitForReplies();
+
+//    prepareBackupOperation.addToResults(ids[0].member, ids[0].persistentIds);
+//    prepareBackupOperation.addToResults(ids[1].member, ids[1].persistentIds);
+
+    HashSet<PersistentID> persistentIdsForSender = new HashSet<>();
+    persistentIdsForSender.add(mock(PersistentID.class));
+    when(prepareBackup.run()).thenReturn(persistentIdsForSender);
+
+    assertThat(prepareBackupOperation.send()).containsOnlyKeys(member1, member2, sender)
+        .containsValues(persistentIdsForSender, persistentIdsForMember1, persistentIdsForMember2);
+  }
+
+  @Test
+  public void needTestsForAddToResults() throws Exception {
+    fail("needTestsForAddToResults");
+  }
+
+  @Test
+  public void processLocallyThrowsIOException() throws Exception {
+    fail("processLocallyThrowsIOException");
+  }
+
+  @Test
+  public void processLocallyThrowsNonIOException() throws Exception {
+    fail("processLocallyThrowsNonIOException");
+  }
+
+  @Test
+  public void sendShouldPrepareForBackupInLocalMemberBeforeWaitingForReplies() throws Exception {
+    fail("sendShouldPrepareForBackupInLocalMemberBeforeWaitingForReplies");
+//    InOrder inOrder = inOrder(backupManager, replyProcessor);
+
+//    prepareBackupRequest.send();
+
+    // assert that prepareForBackup is invoked before invoking waitForReplies
+//    inOrder.verify(backupManager, times(1)).prepareForBackup();
+//    inOrder.verify(replyProcessor, times(1)).waitForReplies();
+  }
+
+  private Answer<Object> invokeAddToResults(MemberWithPersistentIds... memberWithPersistentIds) {
+    return invocation -> {
+      for (MemberWithPersistentIds ids : memberWithPersistentIds) {
+        prepareBackupOperation.addToResults(ids.member, ids.persistentIds);
+      }
+      return null;
+    };
+  }
+
+  private static class MemberWithPersistentIds {
+    InternalDistributedMember member;
+    HashSet<PersistentID> persistentIds;
+    MemberWithPersistentIds(InternalDistributedMember member, HashSet<PersistentID> persistentIds) {
+      this.member = member;
+      this.persistentIds = persistentIds;
+    }
+  }
+}
\ No newline at end of file
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessorTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessorTest.java
new file mode 100644
index 0000000..7f22f75
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupReplyProcessorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.geode.CancelCriterion;
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.HashSet;
+import java.util.Set;
+
+@Category(UnitTest.class)
+public class PrepareBackupReplyProcessorTest {
+
+  private PrepareBackupReplyProcessor prepareBackupReplyProcessor;
+
+  private BackupResultCollector resultCollector;
+  private DM dm;
+  private InternalDistributedSystem system;
+  private InternalDistributedMember sender;
+
+  private Set<InternalDistributedMember> recipients;
+  private Set<PersistentID> persistentIds;
+
+  private PrepareBackupResponse prepareBackupResponse;
+  private DistributionMessage nonPrepareBackupResponse;
+
+  @Before
+  public void setUp() throws Exception {
+    resultCollector = mock(BackupResultCollector.class);
+    dm = mock(DM.class);
+    system = mock(InternalDistributedSystem.class);
+    prepareBackupResponse = mock(PrepareBackupResponse.class);
+    nonPrepareBackupResponse = mock(DistributionMessage.class);
+    sender = mock(InternalDistributedMember.class);
+
+    recipients = new HashSet<>();
+    persistentIds = new HashSet<>();
+
+    when(dm.getSystem()).thenReturn(system);
+    when(dm.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
+    when(prepareBackupResponse.getSender()).thenReturn(sender);
+    when(prepareBackupResponse.getPersistentIds()).thenReturn(persistentIds);
+    when(nonPrepareBackupResponse.getSender()).thenReturn(sender);
+
+    prepareBackupReplyProcessor = new PrepareBackupReplyProcessor(resultCollector, dm, recipients);
+  }
+
+  @Test
+  public void stopBecauseOfExceptionsReturnsFalse() throws Exception {
+    assertThat(prepareBackupReplyProcessor.stopBecauseOfExceptions()).isFalse();
+  }
+
+  @Test
+  public void processPrepareBackupResponseAddsSenderToResults() throws Exception {
+    prepareBackupReplyProcessor.process(prepareBackupResponse, false);
+
+    verify(resultCollector, times(1)).addToResults(eq(sender), eq(persistentIds));
+  }
+
+  @Test
+  public void processNonPrepareBackupResponseDoesNotAddSenderToResults() throws Exception {
+    prepareBackupReplyProcessor.process(nonPrepareBackupResponse, false);
+
+    verify(resultCollector, times(0)).addToResults(eq(sender), eq(persistentIds));
+  }
+}
\ No newline at end of file
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
new file mode 100644
index 0000000..59f7daa
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.geode.cache.persistence.PersistentID;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.admin.remote.AdminFailureResponse;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+@Category(UnitTest.class)
+public class PrepareBackupRequestTest {
+
+  private PrepareBackupRequest prepareBackupRequest;
+
+  private DM dm;
+  private Set<InternalDistributedMember> recipients;
+  private int msgId;
+  private PrepareBackupFactory prepareBackupFactory;
+  private InternalDistributedMember sender;
+  private InternalCache cache;
+  private HashSet<PersistentID> persistentIds;
+  private PrepareBackup prepareBackup;
+
+  @Before
+  public void setUp() throws Exception {
+    dm = mock(DM.class);
+    sender = mock(InternalDistributedMember.class);
+    cache = mock(InternalCache.class);
+    prepareBackupFactory = mock(PrepareBackupFactory.class);
+    prepareBackup = mock(PrepareBackup.class);
+
+    msgId = 42;
+    recipients = new HashSet<>();
+    persistentIds = new HashSet<>();
+
+    when(dm.getCache()).thenReturn(cache);
+    when(dm.getDistributionManagerId()).thenReturn(sender);
+    when(prepareBackupFactory.createPrepareBackup(eq(sender), eq(cache))).thenReturn(prepareBackup);
+    when(prepareBackupFactory.createPrepareBackupResponse(eq(sender), eq(persistentIds))).thenReturn(mock(PrepareBackupResponse.class));
+    when(prepareBackup.run()).thenReturn(persistentIds);
+
+    prepareBackupRequest = new PrepareBackupRequest(sender, recipients, msgId, prepareBackupFactory);
+  }
+
+  @Test
+  public void usesFactoryToCreatePrepareBackup() throws Exception {
+    prepareBackupRequest.createResponse(dm);
+
+    verify(prepareBackupFactory, times(1)).createPrepareBackup(eq(sender), eq(cache));
+  }
+
+  @Test
+  public void usesFactoryToCreatePrepareBackupResponse() throws Exception {
+    prepareBackupRequest.createResponse(dm);
+
+    verify(prepareBackupFactory, times(1)).createPrepareBackupResponse(eq(sender), eq(persistentIds));
+  }
+
+  @Test
+  public void returnsPrepareBackupResponse() throws Exception {
+    assertThat(prepareBackupRequest.createResponse(dm)).isInstanceOf(PrepareBackupResponse.class);
+  }
+
+  @Test
+  public void returnsAdminFailureResponseWhenPrepareBackupThrowsIOException() throws Exception {
+    when(prepareBackup.run()).thenThrow(new IOException());
+
+    assertThat(prepareBackupRequest.createResponse(dm)).isInstanceOf(AdminFailureResponse.class);
+  }
+
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/ReplicateBackupPrepareAndFinishMsgDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/ReplicateBackupPrepareAndFinishMsgDUnitTest.java
new file mode 100644
index 0000000..d8a7622
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/ReplicateBackupPrepareAndFinishMsgDUnitTest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+
+public class ReplicateBackupPrepareAndFinishMsgDUnitTest
+    extends BackupPrepareAndFinishMsgDUnitTest {
+
+  @Override
+  public Region<Integer, Integer> createRegion() {
+    return createRegion(RegionShortcut.REPLICATE_PERSISTENT);
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/UnixScriptGeneratorTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/UnixScriptGeneratorTest.java
similarity index 83%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/persistence/UnixScriptGeneratorTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/UnixScriptGeneratorTest.java
index b665b64..df832a6 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/UnixScriptGeneratorTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/UnixScriptGeneratorTest.java
@@ -1,34 +1,35 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.util.List;
-
+import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TemporaryFolder;
 
-import org.apache.geode.test.junit.categories.IntegrationTest;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.List;
 
 @Category(IntegrationTest.class)
 public class UnixScriptGeneratorTest {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/WindowsScriptGeneratorTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/WindowsScriptGeneratorTest.java
similarity index 84%
rename from geode-core/src/test/java/org/apache/geode/internal/cache/persistence/WindowsScriptGeneratorTest.java
rename to geode-core/src/test/java/org/apache/geode/internal/cache/backup/WindowsScriptGeneratorTest.java
index fba97bc..265036c 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/WindowsScriptGeneratorTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/WindowsScriptGeneratorTest.java
@@ -1,34 +1,35 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-package org.apache.geode.internal.cache.persistence;
+package org.apache.geode.internal.cache.backup;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.util.List;
-
+import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TemporaryFolder;
 
-import org.apache.geode.test.junit.categories.IntegrationTest;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.List;
 
 @Category(IntegrationTest.class)
 public class WindowsScriptGeneratorTest {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistPRKRFDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistPRKRFDUnitTest.java
index edc068f..e8fb004 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistPRKRFDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistPRKRFDUnitTest.java
@@ -21,6 +21,7 @@ import java.util.Properties;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.Declarable;
@@ -28,16 +29,17 @@ import org.apache.geode.cache.EntryEvent;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.util.CacheWriterAdapter;
 import org.apache.geode.cache30.CacheSerializableRunnable;
+import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.DiskStoreImpl;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.ThreadUtils;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
@@ -66,7 +68,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     createData(vm0, 0, 10, "a");
     vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         // let the region to hold on the put until diskstore is closed
         if (!DiskStoreImpl.KRF_DEBUG) {
           region.getAttributesMutator().setCacheWriter(new MyWriter());
@@ -77,7 +79,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     // create test
     AsyncInvocation async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async create") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.put(10, "b");
@@ -111,7 +113,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     createPR(vm0, 0);
     vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         // let the region to hold on the put until diskstore is closed
         if (!DiskStoreImpl.KRF_DEBUG) {
           region.getAttributesMutator().setCacheWriter(new MyWriter());
@@ -120,7 +122,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     });
     async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async update") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.put(1, "b");
@@ -154,7 +156,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     createPR(vm0, 0);
     vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         // let the region to hold on the put until diskstore is closed
         if (!DiskStoreImpl.KRF_DEBUG) {
           region.getAttributesMutator().setCacheWriter(new MyWriter());
@@ -163,7 +165,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     });
     async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async destroy") {
       public void run2() throws CacheException {
-        Region region = getRootRegion(PR_REGION_NAME);
+        Region region = getRootRegion(getPartitionedRegionName());
         IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.destroy(2, "b");
@@ -197,6 +199,24 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     closeCache(vm0);
   }
 
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
+      }
+    });
+  }
+
   private static class MyWriter extends CacheWriterAdapter implements Declarable {
     public MyWriter() {}
 
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index c526fc2..d66bb84 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -64,6 +64,7 @@ import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.cache.ColocationLogger;
+import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.InitialImageOperation.RequestImageMessage;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.PartitionedRegionHelper;
@@ -192,9 +193,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
         paf.setColocatedWith("region2");
@@ -212,13 +213,13 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
     createData(vm0, 0, NUM_BUCKETS, "c", "region3");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
     assertEquals(vm0Buckets, getBucketList(vm0, "region3"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
     assertEquals(vm1Buckets, getBucketList(vm1, "region3"));
-    Set<Integer> vm2Buckets = getBucketList(vm2, PR_REGION_NAME);
+    Set<Integer> vm2Buckets = getBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
     assertEquals(vm2Buckets, getBucketList(vm2, "region3"));
 
@@ -236,9 +237,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
 
     // The secondary buckets can be recovered asynchronously,
     // so wait for them to come back.
-    waitForBuckets(vm0, vm0Buckets, PR_REGION_NAME);
+    waitForBuckets(vm0, vm0Buckets, getPartitionedRegionName());
     waitForBuckets(vm0, vm0Buckets, "region2");
-    waitForBuckets(vm1, vm1Buckets, PR_REGION_NAME);
+    waitForBuckets(vm1, vm1Buckets, getPartitionedRegionName());
     waitForBuckets(vm1, vm1Buckets, "region2");
 
     checkData(vm0, 0, NUM_BUCKETS, "a");
@@ -294,8 +295,8 @@ public class PersistentColocatedPartitionedRegionDUnitTest
   private SerializableRunnable createPRsColocatedPairThread =
       new SerializableRunnable("create2PRs") {
         public void run() {
-          createPR(PR_REGION_NAME, true);
-          createPR("region2", PR_REGION_NAME, true);
+          createPR(getPartitionedRegionName(), true);
+          createPR("region2", getPartitionedRegionName(), true);
         }
       };
 
@@ -303,9 +304,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
       new SerializableRunnable("create multiple child PRs") {
         @Override
         public void run() throws Exception {
-          createPR(PR_REGION_NAME, true);
+          createPR(getPartitionedRegionName(), true);
           for (int i = 2; i < numChildPRs + 2; ++i) {
-            createPR("region" + i, PR_REGION_NAME, true);
+            createPR("region" + i, getPartitionedRegionName(), true);
           }
         }
       };
@@ -314,8 +315,8 @@ public class PersistentColocatedPartitionedRegionDUnitTest
       new SerializableRunnable("create PR colocation hierarchy") {
         @Override
         public void run() throws Exception {
-          createPR(PR_REGION_NAME, true);
-          createPR("region2", PR_REGION_NAME, true);
+          createPR(getPartitionedRegionName(), true);
+          createPR("region2", getPartitionedRegionName(), true);
           for (int i = 3; i < numChildPRGenerations + 2; ++i) {
             createPR("region" + i, "region" + (i - 1), true);
           }
@@ -330,7 +331,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           try {
             // Skip creation of first region - expect region2 creation to fail
             // createPR(PR_REGION_NAME, true);
-            createPR("region2", PR_REGION_NAME, true);
+            createPR("region2", getPartitionedRegionName(), true);
           } catch (Exception e) {
             ex = e;
             exClass = e.getClass().toString();
@@ -362,7 +363,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           try {
             // Skip creation of first region - expect region2 creation to fail
             // createPR(PR_REGION_NAME, true);
-            createPR("region2", PR_REGION_NAME, true);
+            createPR("region2", getPartitionedRegionName(), true);
           } catch (Exception e) {
             ex = e;
             exClass = e.getClass().toString();
@@ -394,7 +395,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
 
           AtomicBoolean isDone = new AtomicBoolean(false);
           try {
-            createPR(PR_REGION_NAME, true);
+            createPR(getPartitionedRegionName(), true);
             // Let this thread continue running long enough for the missing region to be logged a
             // couple times.
             // Child regions do not get created by this thread.
@@ -437,14 +438,14 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           List<LogEvent> logEvents = Collections.emptyList();
 
           try {
-            createPR(PR_REGION_NAME, true);
+            createPR(getPartitionedRegionName(), true);
             // Delay creation of second (i.e child) region to see missing colocated region log
             // message (logInterval/2 < delay < logInterval)
             await().atMost(MAX_WAIT, TimeUnit.MILLISECONDS).until(() -> {
               verify(mockAppender, times(1)).append(loggingEventCaptor.capture());
             });
             logEvents = loggingEventCaptor.getAllValues();
-            createPR("region2", PR_REGION_NAME, true);
+            createPR("region2", getPartitionedRegionName(), true);
             // Another delay before exiting the thread to make sure that missing region logging
             // doesn't continue after missing region is created (delay > logInterval)
             await().atMost(logInterval * 2, TimeUnit.MILLISECONDS).until(() -> {
@@ -485,13 +486,13 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           List<LogEvent> logEvents = Collections.emptyList();
           int numLogEvents = 0;
 
-          createPR(PR_REGION_NAME, true);
+          createPR(getPartitionedRegionName(), true);
           // Delay creation of child generation regions to see missing colocated region log message
           // parent region is generation 1, child region is generation 2, grandchild is 3, etc.
           for (int generation = 2; generation < (numChildPRGenerations + 2); ++generation) {
             String childPRName = "region" + generation;
             String colocatedWithRegionName =
-                generation == 2 ? PR_REGION_NAME : "region" + (generation - 1);
+                generation == 2 ? getPartitionedRegionName() : "region" + (generation - 1);
             loggingEventCaptor = ArgumentCaptor.forClass(LogEvent.class);
 
             // delay between starting generations of child regions until the expected missing
@@ -551,7 +552,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           List<LogEvent> logEvents = Collections.emptyList();
           int numLogEvents = 0;
 
-          createPR(PR_REGION_NAME, true);
+          createPR(getPartitionedRegionName(), true);
           // Delay creation of child generation regions to see missing colocated region log message
           for (int regionNum = 2; regionNum < (numChildPRs + 2); ++regionNum) {
             String childPRName = "region" + regionNum;
@@ -570,7 +571,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
             assertEquals("Expected warning messages to be logged.", regionNum - 1, numLogEvents);
 
             // Start the child region
-            createPR(childPRName, PR_REGION_NAME, true);
+            createPR(childPRName, getPartitionedRegionName(), true);
           }
           String logMsg = "";
           logEvents = loggingEventCaptor.getAllValues();
@@ -616,8 +617,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           List<LogEvent> logEvents = Collections.emptyList();
 
           try {
-            createPR(PR_REGION_NAME, true);
-            createPR("region2", PR_REGION_NAME, true); // This child region is never created
+            createPR(getPartitionedRegionName(), true);
+            createPR("region2", getPartitionedRegionName(), true); // This child region is never
+                                                                   // created
             // Let this thread continue running long enough for the missing region to be logged a
             // couple times.
             // Grandchild region does not get created by this thread. (1.5*logInterval < delay <
@@ -640,6 +642,24 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         }
       };
 
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
+      }
+    });
+  }
+
   private class ColocationLoggerIntervalSetter extends SerializableRunnable {
     private int logInterval;
 
@@ -742,10 +762,10 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
 
     closeCache(vm0);
@@ -778,10 +798,10 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
 
     closeCache(vm0);
@@ -836,10 +856,10 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
 
     closeCache(vm0);
@@ -881,10 +901,10 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
 
     closeCache(vm0);
@@ -931,9 +951,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
     createData(vm0, 0, NUM_BUCKETS, "c", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertFalse(vm1Buckets.isEmpty());
     for (int i = 2; i < numChildPRs + 2; ++i) {
       String childName = "region" + i;
@@ -986,9 +1006,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
     createData(vm0, 0, NUM_BUCKETS, "c", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertFalse(vm1Buckets.isEmpty());
     for (int i = 2; i < numChildPRs + 2; ++i) {
       String childName = "region" + i;
@@ -1041,9 +1061,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
     createData(vm0, 0, NUM_BUCKETS, "c", "region3");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertFalse(vm1Buckets.isEmpty());
     for (int i = 2; i < numChildGenerations + 2; ++i) {
       String childName = "region" + i;
@@ -1096,9 +1116,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
     createData(vm0, 0, NUM_BUCKETS, "c", "region3");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertFalse(vm0Buckets.isEmpty());
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertFalse(vm1Buckets.isEmpty());
     for (int i = 2; i < numChildGenerations + 2; ++i) {
       String childName = "region" + i;
@@ -1332,7 +1352,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
     SerializableRunnable createChildPR = getCreateChildPRRunnable();
@@ -1346,18 +1366,18 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
-    Set<Integer> vm2Buckets = getBucketList(vm2, PR_REGION_NAME);
+    Set<Integer> vm2Buckets = getBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
 
-    Set<Integer> vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0PrimaryBuckets = getPrimaryBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
-    Set<Integer> vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1PrimaryBuckets = getPrimaryBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
-    Set<Integer> vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
+    Set<Integer> vm2PrimaryBuckets = getPrimaryBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
 
     closeCache(vm0);
@@ -1377,19 +1397,19 @@ public class PersistentColocatedPartitionedRegionDUnitTest
 
     Wait.pause(4000);
 
-    assertEquals(vm0Buckets, getBucketList(vm0, PR_REGION_NAME));
+    assertEquals(vm0Buckets, getBucketList(vm0, getPartitionedRegionName()));
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    assertEquals(vm1Buckets, getBucketList(vm1, PR_REGION_NAME));
+    assertEquals(vm1Buckets, getBucketList(vm1, getPartitionedRegionName()));
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
-    assertEquals(vm2Buckets, getBucketList(vm2, PR_REGION_NAME));
+    assertEquals(vm2Buckets, getBucketList(vm2, getPartitionedRegionName()));
     assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
 
     // primary can differ
-    vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
+    vm0PrimaryBuckets = getPrimaryBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
-    vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
+    vm1PrimaryBuckets = getPrimaryBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
-    vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
+    vm2PrimaryBuckets = getPrimaryBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
 
 
@@ -1429,7 +1449,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         AttributesFactory af = new AttributesFactory();
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setRedundantCopies(1);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
 
@@ -1463,7 +1483,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
     SerializableRunnable createChildPR = getCreateChildPRRunnable();
@@ -1478,18 +1498,18 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-    Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0Buckets = getBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
-    Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1Buckets = getBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
-    Set<Integer> vm2Buckets = getBucketList(vm2, PR_REGION_NAME);
+    Set<Integer> vm2Buckets = getBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
 
-    Set<Integer> vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
+    Set<Integer> vm0PrimaryBuckets = getPrimaryBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
-    Set<Integer> vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
+    Set<Integer> vm1PrimaryBuckets = getPrimaryBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
-    Set<Integer> vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
+    Set<Integer> vm2PrimaryBuckets = getPrimaryBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
 
     closeCache(vm2);
@@ -1527,18 +1547,18 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     vm2.invoke(createParentPR);
     // Make sure vm2 hasn't created any buckets in the parent PR yet
     // We don't want any buckets until the child PR is created
-    assertEquals(Collections.emptySet(), getBucketList(vm2, PR_REGION_NAME));
+    assertEquals(Collections.emptySet(), getBucketList(vm2, getPartitionedRegionName()));
     vm2.invoke(createChildPR);
 
     // Now vm2 should have created all of the appropriate buckets.
-    assertEquals(vm2Buckets, getBucketList(vm2, PR_REGION_NAME));
+    assertEquals(vm2Buckets, getBucketList(vm2, getPartitionedRegionName()));
     assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
 
-    vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
+    vm0PrimaryBuckets = getPrimaryBucketList(vm0, getPartitionedRegionName());
     assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
-    vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
+    vm1PrimaryBuckets = getPrimaryBucketList(vm1, getPartitionedRegionName());
     assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
-    vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
+    vm2PrimaryBuckets = getPrimaryBucketList(vm2, getPartitionedRegionName());
     assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
   }
 
@@ -1569,9 +1589,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
 
@@ -1621,14 +1641,14 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
         DiskStore ds2 = cache.findDiskStore("disk2");
         if (ds2 == null) {
           ds2 = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk2");
         }
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         af.setDiskStoreName("disk2");
         cache.createRegion("region2", af.create());
@@ -1676,7 +1696,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     closeCache(vm2);
 
     // Wait until redundancy is recovered.
-    waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
+    waitForRedundancyRecovery(vm0, 1, getPartitionedRegionName());
     waitForRedundancyRecovery(vm0, 1, "region2");
 
     createData(vm0, 0, NUM_BUCKETS, "b");
@@ -1719,11 +1739,11 @@ public class PersistentColocatedPartitionedRegionDUnitTest
       checkData(vm0, 0, NUM_BUCKETS, "b");
       checkData(vm0, 0, NUM_BUCKETS, "b", "region2");
 
-      waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
+      waitForRedundancyRecovery(vm0, 1, getPartitionedRegionName());
       waitForRedundancyRecovery(vm0, 1, "region2");
-      waitForRedundancyRecovery(vm1, 1, PR_REGION_NAME);
+      waitForRedundancyRecovery(vm1, 1, getPartitionedRegionName());
       waitForRedundancyRecovery(vm1, 1, "region2");
-      waitForRedundancyRecovery(vm2, 1, PR_REGION_NAME);
+      waitForRedundancyRecovery(vm2, 1, getPartitionedRegionName());
       waitForRedundancyRecovery(vm2, 1, "region2");
 
       // Make sure we don't have any extra buckets after the restart
@@ -1760,7 +1780,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
 
@@ -1783,7 +1803,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setRedundantCopies(1);
         paf.setRecoveryDelay(0);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
         af.setPartitionAttributes(paf.create());
@@ -1820,7 +1840,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
 
@@ -1848,7 +1868,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setRedundantCopies(1);
         paf.setRecoveryDelay(0);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk2");
         af.setPartitionAttributes(paf.create());
@@ -1907,7 +1927,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     closeCache(vm2);
 
     // Wait until redundancy is recovered.
-    waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
+    waitForRedundancyRecovery(vm0, 1, getPartitionedRegionName());
     waitForRedundancyRecovery(vm0, 1, "region2");
 
     createData(vm0, 0, NUM_BUCKETS, "b");
@@ -1963,7 +1983,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     // Make sure we can actually use the buckets in the child region.
     createData(vm0, 0, NUM_BUCKETS, "c", "region2");
 
-    waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
+    waitForRedundancyRecovery(vm0, 1, getPartitionedRegionName());
     waitForRedundancyRecovery(vm0, 1, "region2");
 
     // Make sure we don't have any extra buckets after the restart
@@ -2012,9 +2032,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
       }
@@ -2143,9 +2163,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         paf.setLocalMaxMemory(0);
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PARTITION);
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
       }
@@ -2166,9 +2186,9 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setPartitionAttributes(paf.create());
         cache.createRegion("region2", af.create());
       }
@@ -2186,7 +2206,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
     SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {
       public void run() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
         try {
           for (int i = 0;; i++) {
             try {
@@ -2278,7 +2298,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
 
@@ -2290,7 +2310,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setRedundantCopies(0);
         paf.setRecoveryDelay(0);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
         af.setPartitionAttributes(paf.create());
@@ -2331,11 +2351,11 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
         paf.setRedundantCopies(1);
         paf.setRecoveryDelay(-1);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
         af.setPartitionAttributes(paf.create());
@@ -2449,7 +2469,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
       }
     };
 
@@ -2466,7 +2486,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setRedundantCopies(0);
         paf.setRecoveryDelay(0);
-        paf.setColocatedWith(PR_REGION_NAME);
+        paf.setColocatedWith(getPartitionedRegionName());
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk2");
         af.setPartitionAttributes(paf.create());
@@ -2549,7 +2569,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           af.setPartitionAttributes(paf.create());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
-          cache.createRegion(PR_REGION_NAME, af.create());
+          cache.createRegion(getPartitionedRegionName(), af.create());
         } finally {
           System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout",
               String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
@@ -2567,7 +2587,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           PartitionAttributesFactory paf = new PartitionAttributesFactory();
           paf.setRedundantCopies(0);
           paf.setRecoveryDelay(0);
-          paf.setColocatedWith(PR_REGION_NAME);
+          paf.setColocatedWith(getPartitionedRegionName());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
           af.setPartitionAttributes(paf.create());
@@ -2614,7 +2634,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           af.setPartitionAttributes(paf.create());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
-          cache.createRegion(PR_REGION_NAME, af.create());
+          cache.createRegion(getPartitionedRegionName(), af.create());
         } finally {
           System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout",
               String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
@@ -2633,7 +2653,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           PartitionAttributesFactory paf = new PartitionAttributesFactory();
           paf.setRedundantCopies(0);
           paf.setRecoveryDelay(0);
-          paf.setColocatedWith(PR_REGION_NAME);
+          paf.setColocatedWith(getPartitionedRegionName());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
           af.setPartitionAttributes(paf.create());
@@ -2679,7 +2699,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           af.setPartitionAttributes(paf.create());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
-          cache.createRegion(PR_REGION_NAME, af.create());
+          cache.createRegion(getPartitionedRegionName(), af.create());
         } finally {
           System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout",
               String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
@@ -2697,7 +2717,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
           PartitionAttributesFactory paf = new PartitionAttributesFactory();
           paf.setRedundantCopies(0);
           paf.setRecoveryDelay(0);
-          paf.setColocatedWith(PR_REGION_NAME);
+          paf.setColocatedWith(getPartitionedRegionName());
           af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
           af.setDiskStoreName("disk");
           af.setPartitionAttributes(paf.create());
@@ -2856,7 +2876,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
       @Override
       public void run() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         for (int i = 0; i < NUM_BUCKETS; i++) {
           assertEquals("For key " + i, "a", region.get(i));
@@ -2937,7 +2957,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest
       @Override
       public void run() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         for (int i = 0; i < NUM_BUCKETS; i++) {
           assertEquals("For key " + i, "a", region.get(i));
@@ -2949,8 +2969,8 @@ public class PersistentColocatedPartitionedRegionDUnitTest
 
       public void run() {
         Cache cache = getCache();
-        LogWriterUtils.getLogWriter().info("creating data in " + PR_REGION_NAME);
-        Region region = cache.getRegion(PR_REGION_NAME);
+        LogWriterUtils.getLogWriter().info("creating data in " + getPartitionedRegionName());
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         for (int i = 0; i < NUM_BUCKETS; i++) {
           region.put(i, "c");
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
index 014eea8..69629a9 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
@@ -32,7 +32,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.awaitility.Awaitility;
 import org.junit.Ignore;
@@ -77,6 +76,7 @@ import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.internal.AvailablePort;
+import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.InitialImageOperation.RequestImageMessage;
 import org.apache.geode.internal.cache.PartitionedRegion;
@@ -169,7 +169,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     } catch (RMIException exp) {
       assertTrue(exp.getCause() instanceof IllegalStateException);
       IllegalStateException ise = (IllegalStateException) exp.getCause();
-      Object[] prms = new Object[] {"/" + PR_REGION_NAME, 2, 5};
+      Object[] prms = new Object[] {"/" + getPartitionedRegionName(), 2, 5};
       assertTrue(ise.getMessage().contains(
           LocalizedStrings.PartitionedRegion_FOR_REGION_0_TotalBucketNum_1_SHOULD_NOT_BE_CHANGED_Previous_Configured_2
               .toString(prms)));
@@ -181,7 +181,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     } catch (RMIException exp) {
       assertTrue(exp.getCause() instanceof IllegalStateException);
       IllegalStateException ise = (IllegalStateException) exp.getCause();
-      Object[] prms = new Object[] {"/" + PR_REGION_NAME, 10, 5};
+      Object[] prms = new Object[] {"/" + getPartitionedRegionName(), 10, 5};
       assertTrue(ise.getMessage().contains(
           LocalizedStrings.PartitionedRegion_FOR_REGION_0_TotalBucketNum_1_SHOULD_NOT_BE_CHANGED_Previous_Configured_2
               .toString(prms)));
@@ -213,7 +213,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
         af.setDiskStoreName("disk");
         RegionAttributes attr = af.create();
-        cache.createRegion(PR_REGION_NAME, attr);
+        cache.createRegion(getPartitionedRegionName(), attr);
       }
     };
 
@@ -292,7 +292,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       vm0.invoke(new SerializableRunnable() {
         public void run() {
           Cache cache = getCache();
-          Region region = cache.getRegion(PR_REGION_NAME);
+          Region region = cache.getRegion(getPartitionedRegionName());
           try {
             for (int i = 0; i < numBuckets; i++) {
               region.put(i, new BadSerializer());
@@ -328,6 +328,24 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     }
   }
 
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
+      }
+    });
+  }
+
   public static class BadSerializer implements DataSerializable {
 
     public BadSerializer() {
@@ -610,7 +628,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     vm0.invoke(new SerializableRunnable("Test ways to read") {
       public void run() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         try {
           FunctionService.onRegion(region).execute(new TestFunction());
@@ -652,7 +670,8 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         }
 
         try {
-          cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
+          cache.getQueryService().newQuery("select * from /" + getPartitionedRegionName())
+              .execute();
           fail("Should not have been able to read from missing buckets!");
         } catch (PartitionOfflineException e) {
           // expected
@@ -1044,7 +1063,9 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
   /**
    * This test this case we replace buckets where are offline on A by creating them on C We then
-   * shutdown C and restart A, which recovers those buckets
+   * shutdown C and restart A, which recovers those buckets<p>
+   *
+   * TRAC 41340: data inconsistency after disk recovery from persistent PR
    */
   @Test
   public void testBug41340() throws Throwable {
@@ -1159,7 +1180,9 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
   /**
    * Test that we don't record our old member ID as offline, preventing redundancy recovery in the
-   * future.
+   * future.<p>
+   *
+   * TRAC 41341: Redundancy not restored after reinitializing after locally destroying persistent PR
    */
   @Test
   public void testBug41341() {
@@ -1405,7 +1428,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         paf.setLocalMaxMemory(0);
         af.setPartitionAttributes(paf.create());
         af.setDataPolicy(DataPolicy.PARTITION);
-        cache.createRegion(PR_REGION_NAME, af.create());
+        cache.createRegion(getPartitionedRegionName(), af.create());
 
         CacheServer server = cache.addCacheServer();
         server.setPort(AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET));
@@ -1438,7 +1461,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
           af.setDataPolicy(DataPolicy.NORMAL);
           af.setScope(Scope.LOCAL);
           af.setPoolName("pool");
-          Region region = cache.createRegion(PR_REGION_NAME, af.create());
+          Region region = cache.createRegion(getPartitionedRegionName(), af.create());
           try {
             region.registerInterestRegex(".*");
           } catch (ServerOperationException e) {
@@ -1468,7 +1491,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         EvictionAttributes.createLRUEntryAttributes(50, EvictionAction.OVERFLOW_TO_DISK));
     rf.setDiskDirs(getDiskDirs());
 
-    Region region = rf.create(PR_REGION_NAME);
+    Region region = rf.create(getPartitionedRegionName());
     region.get(0);
     cache.getDistributedSystem().disconnect();
     // cache.close();
@@ -1490,8 +1513,8 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
 
           @Override
-          public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {
-            if (msg instanceof ManageBucketReplyMessage) {
+          public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
+            if (message instanceof ManageBucketReplyMessage) {
               Cache cache = getCache();
               disconnectFromDS();
 
@@ -1554,18 +1577,18 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     createNestedPR(vm1);
     createNestedPR(vm2);
 
-    createData(vm0, 0, numBuckets, "a", "parent1/" + PR_REGION_NAME);
-    createData(vm0, 0, numBuckets, "b", "parent2/" + PR_REGION_NAME);
-    checkData(vm2, 0, numBuckets, "a", "parent1/" + PR_REGION_NAME);
-    checkData(vm2, 0, numBuckets, "b", "parent2/" + PR_REGION_NAME);
+    createData(vm0, 0, numBuckets, "a", "parent1/" + getPartitionedRegionName());
+    createData(vm0, 0, numBuckets, "b", "parent2/" + getPartitionedRegionName());
+    checkData(vm2, 0, numBuckets, "a", "parent1/" + getPartitionedRegionName());
+    checkData(vm2, 0, numBuckets, "b", "parent2/" + getPartitionedRegionName());
 
-    Set<Integer> vm1_0Buckets = getBucketList(vm0, "parent1/" + PR_REGION_NAME);
-    Set<Integer> vm1_1Buckets = getBucketList(vm1, "parent1/" + PR_REGION_NAME);
-    Set<Integer> vm1_2Buckets = getBucketList(vm2, "parent1/" + PR_REGION_NAME);
+    Set<Integer> vm1_0Buckets = getBucketList(vm0, "parent1/" + getPartitionedRegionName());
+    Set<Integer> vm1_1Buckets = getBucketList(vm1, "parent1/" + getPartitionedRegionName());
+    Set<Integer> vm1_2Buckets = getBucketList(vm2, "parent1/" + getPartitionedRegionName());
 
-    Set<Integer> vm2_0Buckets = getBucketList(vm0, "parent2/" + PR_REGION_NAME);
-    Set<Integer> vm2_1Buckets = getBucketList(vm1, "parent2/" + PR_REGION_NAME);
-    Set<Integer> vm2_2Buckets = getBucketList(vm2, "parent2/" + PR_REGION_NAME);
+    Set<Integer> vm2_0Buckets = getBucketList(vm0, "parent2/" + getPartitionedRegionName());
+    Set<Integer> vm2_1Buckets = getBucketList(vm1, "parent2/" + getPartitionedRegionName());
+    Set<Integer> vm2_2Buckets = getBucketList(vm2, "parent2/" + getPartitionedRegionName());
 
     closeCache(vm0);
     closeCache(vm1);
@@ -1583,20 +1606,20 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     async1.getResult();
     async2.getResult();
 
-    assertEquals(vm1_0Buckets, getBucketList(vm0, "parent1/" + PR_REGION_NAME));
-    assertEquals(vm1_1Buckets, getBucketList(vm1, "parent1/" + PR_REGION_NAME));
-    assertEquals(vm1_2Buckets, getBucketList(vm2, "parent1/" + PR_REGION_NAME));
+    assertEquals(vm1_0Buckets, getBucketList(vm0, "parent1/" + getPartitionedRegionName()));
+    assertEquals(vm1_1Buckets, getBucketList(vm1, "parent1/" + getPartitionedRegionName()));
+    assertEquals(vm1_2Buckets, getBucketList(vm2, "parent1/" + getPartitionedRegionName()));
 
-    assertEquals(vm2_0Buckets, getBucketList(vm0, "parent2/" + PR_REGION_NAME));
-    assertEquals(vm2_1Buckets, getBucketList(vm1, "parent2/" + PR_REGION_NAME));
-    assertEquals(vm2_2Buckets, getBucketList(vm2, "parent2/" + PR_REGION_NAME));
+    assertEquals(vm2_0Buckets, getBucketList(vm0, "parent2/" + getPartitionedRegionName()));
+    assertEquals(vm2_1Buckets, getBucketList(vm1, "parent2/" + getPartitionedRegionName()));
+    assertEquals(vm2_2Buckets, getBucketList(vm2, "parent2/" + getPartitionedRegionName()));
 
-    checkData(vm0, 0, numBuckets, "a", "parent1/" + PR_REGION_NAME);
-    checkData(vm0, 0, numBuckets, "b", "parent2/" + PR_REGION_NAME);
-    createData(vm1, numBuckets, 113, "c", "parent1/" + PR_REGION_NAME);
-    createData(vm1, numBuckets, 113, "d", "parent2/" + PR_REGION_NAME);
-    checkData(vm2, numBuckets, 113, "c", "parent1/" + PR_REGION_NAME);
-    checkData(vm2, numBuckets, 113, "d", "parent2/" + PR_REGION_NAME);
+    checkData(vm0, 0, numBuckets, "a", "parent1/" + getPartitionedRegionName());
+    checkData(vm0, 0, numBuckets, "b", "parent2/" + getPartitionedRegionName());
+    createData(vm1, numBuckets, 113, "c", "parent1/" + getPartitionedRegionName());
+    createData(vm1, numBuckets, 113, "d", "parent2/" + getPartitionedRegionName());
+    checkData(vm2, numBuckets, 113, "c", "parent1/" + getPartitionedRegionName());
+    checkData(vm2, numBuckets, 113, "d", "parent2/" + getPartitionedRegionName());
   }
 
   @Test
@@ -1616,7 +1639,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
       public Object call() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         int i = 0;
         while (true) {
@@ -1636,7 +1659,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     SerializableCallable waitForIntValue = new SerializableCallable() {
       public Object call() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
         // The value is initialized as a String so wait
         // for it to be changed to an Integer.
         await().atMost(60, SECONDS).until(() -> {
@@ -1669,7 +1692,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
       public Object call() {
         Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
         int value = (Integer) region.get(0);
         return value;
       }
@@ -1686,9 +1709,10 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
    * Test for bug 4226. 1. Member A has the bucket 2. Member B starts creating the bucket. It tells
    * member A that it hosts the bucket 3. Member A crashes 4. Member B destroys the bucket and
    * throws a partition offline exception, because it wasn't able to complete initialization. 5.
-   * Member A recovers, and gets stuck waiting for member B.
+   * Member A recovers, and gets stuck waiting for member B.<p>
    *
-   * @throws Throwable
+   * TRAC 42226: recycled VM hangs during re-start while waiting for Partition to come online
+   * (after Controller VM sees unexpected PartitionOffLineException while doing ops)
    */
   @Category(FlakyTest.class) // GEODE-1208: time sensitive, multiple non-thread-safe test hooks,
                              // async actions
@@ -1834,7 +1858,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
           Cache cache = getCache();
           RegionAttributes attr = getPersistentPRAttributes(redundancy, -1, cache, 113, true);
-          cache.createRegion(PR_REGION_NAME, attr);
+          cache.createRegion(getPartitionedRegionName(), attr);
         }
       };
 
@@ -2129,15 +2153,15 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
 
     // create some buckets
     createData(vm0, 0, 2, "a");
-    createData(vm0, 0, 2, "a", PR_CHILD_REGION_NAME);
-    closePR(vm0, PR_CHILD_REGION_NAME);
+    createData(vm0, 0, 2, "a", getChildRegionName());
+    closePR(vm0, getChildRegionName());
     closePR(vm0);
 
     // createPR(vm1, 1);
     createCoLocatedPR(vm1, 1, false);
     // create an overlapping bucket
     createData(vm1, 2, 4, "a");
-    createData(vm1, 2, 4, "a", PR_CHILD_REGION_NAME);
+    createData(vm1, 2, 4, "a", getChildRegionName());
 
     IgnoredException[] expectVm0 =
         {IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0),
@@ -2350,19 +2374,18 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
   private void createChildPR(VM vm) {
     vm.invoke(() -> {
       PartitionAttributes PRatts =
-          new PartitionAttributesFactory().setColocatedWith(PR_REGION_NAME).create();
+          new PartitionAttributesFactory().setColocatedWith(getPartitionedRegionName()).create();
       PartitionedRegion child =
           (PartitionedRegion) PartitionedRegionTestHelper.createPartionedRegion("CHILD", PRatts);
     });
   }
 
-  private static final class RecoveryObserver
-      extends InternalResourceManager.ResourceObserverAdapter {
+  private final class RecoveryObserver extends InternalResourceManager.ResourceObserverAdapter {
     final CountDownLatch recoveryDone = new CountDownLatch(1);
 
     @Override
     public void rebalancingOrRecoveryFinished(Region region) {
-      if (region.getName().equals(PR_REGION_NAME)) {
+      if (region.getName().equals(getPartitionedRegionName())) {
         recoveryDone.countDown();
       }
     }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
index d401b7f..08ea72f 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
@@ -14,26 +14,15 @@
  */
 package org.apache.geode.internal.cache.partitioned;
 
-import static org.apache.geode.test.dunit.Assert.assertEquals;
+import static org.apache.commons.io.FileUtils.listFiles;
+import static org.apache.commons.io.filefilter.DirectoryFileFilter.DIRECTORY;
+import static org.apache.geode.admin.AdminDistributedSystemFactory.defineDistributedSystem;
+import static org.apache.geode.admin.AdminDistributedSystemFactory.getDistributedSystem;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.assertj.core.api.Assertions.assertThat;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.InetAddress;
-import java.util.Collection;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.DirectoryFileFilter;
 import org.apache.commons.io.filefilter.RegexFileFilter;
-
 import org.apache.geode.admin.AdminDistributedSystem;
-import org.apache.geode.admin.AdminDistributedSystemFactory;
 import org.apache.geode.admin.AdminException;
 import org.apache.geode.admin.DistributedSystemConfig;
 import org.apache.geode.cache.AttributesFactory;
@@ -43,361 +32,234 @@ import org.apache.geode.cache.DiskStore;
 import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
-import org.apache.geode.cache.control.RebalanceFactory;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.cache.partition.PartitionRegionInfo;
 import org.apache.geode.cache.persistence.ConflictingPersistentDataException;
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.cache.BackupUtil;
+import org.apache.geode.internal.cache.backup.BackupUtil;
 import org.apache.geode.internal.cache.DiskRegion;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.PartitionedRegionDataStore;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
 import org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver;
-import org.apache.geode.internal.cache.persistence.PersistenceAdvisor;
+import org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
 import org.apache.geode.internal.cache.persistence.PersistenceAdvisorImpl;
+import org.apache.geode.internal.cache.persistence.PersistenceAdvisorImpl.PersistenceAdvisorObserver;
 import org.apache.geode.internal.cache.persistence.PersistentMemberID;
+import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.management.BackupStatus;
 import org.apache.geode.management.ManagementException;
-import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.Invoke;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.logging.log4j.Logger;
+import org.junit.Before;
 
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.InetAddress;
+import java.util.Collection;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+@SuppressWarnings("serial")
 public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTestCase {
+  private static final Logger logger = LogService.getLogger();
 
-  public static String PR_REGION_NAME = "region";
-  public static String PR_CHILD_REGION_NAME = "childRegion";
   // This must be bigger than the dunit ack-wait-threshold for the revoke
   // tests. The command line is setting the ack-wait-threshold to be
   // 60 seconds.
   private static final int MAX_WAIT = 70 * 1000;
 
-  /*
-   * (non-Javadoc) Set the region name for this test so that multiple subclasses of this test base
-   * do not conflict with one another during parallel dunit runs
-   *
-   * @see dunit.DistributedTestCase#setUp()
-   */
-  @Override
-  public final void postSetUp() throws Exception {
-    disconnectAllFromDS();
-    Invoke.invokeInEveryVM(PersistentPartitionedRegionTestBase.class, "setRegionName",
-        new Object[] {getUniqueName()});
-    setRegionName(getUniqueName());
-    postSetUpPersistentPartitionedRegionTestBase();
-  }
+  private static final int NUM_BUCKETS = 113;
+  private static final String CHILD_REGION_NAME = "childRegion";
 
-  protected void postSetUpPersistentPartitionedRegionTestBase() throws Exception {}
+  private String partitionedRegionName;
 
-  public static void setRegionName(String testName) {
-    PR_REGION_NAME = testName + "Region";
-  }
+  @Before
+  public void setUpPersistentPartitionedRegionTestBase() throws Exception {
+    disconnectAllFromDS();
 
-  protected void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
-    vm.invoke(new SerializableRunnable("check recovered from disk") {
-      public void run() {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
-        if (recoveredLocally) {
-          assertEquals(0, disk.getStats().getRemoteInitializations());
-          assertEquals(1, disk.getStats().getLocalInitializations());
-        } else {
-          assertEquals(1, disk.getStats().getRemoteInitializations());
-          assertEquals(0, disk.getStats().getLocalInitializations());
-        }
-      }
-    });
-  }
+    partitionedRegionName = getUniqueName() + "Region";
+    invokeInEveryVM(() -> partitionedRegionName = getUniqueName() + "Region");
 
-  protected void fakeCleanShutdown(VM vm, final int bucketId) {
-    vm.invoke(new SerializableRunnable("mark clean") {
-      public void run() {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
-        for (PersistentMemberID id : disk.getOnlineMembers()) {
-          disk.memberOfflineAndEqual(id);
-        }
-        for (PersistentMemberID id : disk.getOfflineMembers()) {
-          disk.memberOfflineAndEqual(id);
-        }
-        cache.close();
-      }
-    });
+    postSetUpPersistentPartitionedRegionTestBase();
   }
 
-  private PersistentMemberID getPersistentID(VM vm, final int bucketId) {
-    Object id = vm.invoke(new SerializableCallable("get bucket persistent id") {
-      public Object call() {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        PersistenceAdvisor advisor =
-            region.getRegionAdvisor().getBucket(bucketId).getPersistenceAdvisor();
-        return advisor.getPersistentID();
-      }
-    });
-
-    return (PersistentMemberID) id;
+  protected void postSetUpPersistentPartitionedRegionTestBase() throws Exception {
+    // override as needed
   }
 
-  private void forceRecovery(VM vm) {
-    vm.invoke(new SerializableRunnable("force recovery") {
-      public void run() {
-        Cache cache = getCache();
-        RebalanceFactory rf = cache.getResourceManager().createRebalanceFactory();
-        try {
-          rf.start().getResults();
-        } catch (Exception e) {
-          Assert.fail("interupted", e);
-        }
+  void fakeCleanShutdown(final VM vm, final int bucketId) {
+    vm.invoke("fakeCleanShutdown", () -> {
+      Cache cache = getCache();
+      PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+      DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+      for (PersistentMemberID id : disk.getOnlineMembers()) {
+        disk.memberOfflineAndEqual(id);
       }
+      for (PersistentMemberID id : disk.getOfflineMembers()) {
+        disk.memberOfflineAndEqual(id);
+      }
+      cache.close();
     });
   }
 
-  protected void checkData(VM vm0, final int startKey, final int endKey, final String value) {
-    checkData(vm0, startKey, endKey, value, PR_REGION_NAME);
+  protected void checkData(VM vm, final int startKey, final int endKey, final String value) {
+    checkData(vm, startKey, endKey, value, getPartitionedRegionName());
   }
 
-  protected void checkData(VM vm0, final int startKey, final int endKey, final String value,
+  protected void checkData(final VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
-    SerializableRunnable checkData = new SerializableRunnable("CheckData") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(regionName);
-
-        for (int i = startKey; i < endKey; i++) {
-          assertEquals("For key " + i, value, region.get(i));
-        }
+    vm.invoke("checkData", () -> {
+      Region region = getCache().getRegion(regionName);
+      for (int i = startKey; i < endKey; i++) {
+        assertThat(region.get(i)).isEqualTo(value);
       }
-    };
-
-    vm0.invoke(checkData);
+    });
   }
 
-  protected void removeData(VM vm, final int startKey, final int endKey) {
-    SerializableRunnable createData = new SerializableRunnable() {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
-
-        for (int i = startKey; i < endKey; i++) {
-          region.destroy(i);
-        }
+  void removeData(final VM vm, final int startKey, final int endKey) {
+    vm.invoke("removeData", () -> {
+      Region region = getCache().getRegion(getPartitionedRegionName());
+      for (int i = startKey; i < endKey; i++) {
+        region.destroy(i);
       }
-    };
-    vm.invoke(createData);
+    });
   }
 
-  protected void createData(VM vm, final int startKey, final int endKey, final String value) {
-    LogWriterUtils.getLogWriter().info("createData invoked.  PR_REGION_NAME is " + PR_REGION_NAME);
-    createData(vm, startKey, endKey, value, PR_REGION_NAME);
+  protected void createData(final VM vm, final int startKey, final int endKey, final String value) {
+    createData(vm, startKey, endKey, value, getPartitionedRegionName());
   }
 
-  protected void createData(VM vm, final int startKey, final int endKey, final String value,
+  protected void createData(final VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
-    SerializableRunnable createData = new SerializableRunnable("createData") {
-
-      public void run() {
-        Cache cache = getCache();
-        cache.getLogger().info("creating data in " + regionName);
-        Region region = cache.getRegion(regionName);
-
-        for (int i = startKey; i < endKey; i++) {
-          region.put(i, value);
-        }
+    vm.invoke("createData", () -> {
+      Region region =  getCache().getRegion(regionName);
+      for (int i = startKey; i < endKey; i++) {
+        region.put(i, value);
       }
-    };
-    vm.invoke(createData);
+    });
   }
 
-  protected void closeCache(VM vm0) {
-    SerializableRunnable close = new SerializableRunnable("Close Cache") {
-      public void run() {
-        Cache cache = getCache();
-        cache.close();
-      }
-    };
-
-    vm0.invoke(close);
+  protected void closeCache(final VM vm) {
+    vm.invoke("closeCache", () -> getCache().close());
   }
 
-  protected AsyncInvocation closeCacheAsync(VM vm0) {
-    SerializableRunnable close = new SerializableRunnable() {
-      public void run() {
-        Cache cache = getCache();
-        cache.close();
-      }
-    };
-
-    return vm0.invokeAsync(close);
+  AsyncInvocation closeCacheAsync(final VM vm) {
+    return vm.invokeAsync("closeCacheAsync", () -> getCache().close());
   }
 
-  protected void closePR(VM vm0) {
-    closePR(vm0, PR_REGION_NAME);
+  void closePR(final VM vm) {
+    closePR(vm, getPartitionedRegionName());
   }
 
-  protected void closePR(VM vm0, String regionName) {
-    SerializableRunnable close = new SerializableRunnable("Close PR") {
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(regionName);
-        region.close();
-      }
-    };
-
-    vm0.invoke(close);
+  void closePR(final VM vm, final String regionName) {
+    vm.invoke("closePR", () -> getCache().getRegion(regionName).close());
   }
 
-  protected void destroyPR(VM vm0) {
-    destroyPR(vm0, PR_REGION_NAME);
+  void destroyPR(final VM vm) {
+    destroyPR(vm, getPartitionedRegionName());
   }
 
-  protected void destroyPR(VM vm0, String regionName) {
-    SerializableRunnable destroy = new SerializableRunnable("Destroy PR") {
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(regionName);
-        region.localDestroyRegion();
-      }
-    };
-
-    vm0.invoke(destroy);
+  private void destroyPR(final VM vm, String regionName) {
+    vm.invoke("destroyPR", () -> getCache().getRegion(regionName).localDestroyRegion());
   }
 
-  protected void localDestroyPR(VM vm0) {
-    SerializableRunnable destroyPR = new SerializableRunnable("destroy pr") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
-        region.localDestroyRegion();
-      }
-    };
-    vm0.invoke(destroyPR);
+  void localDestroyPR(final VM vm) {
+    vm.invoke("localDestroyPR", () -> getCache().getRegion(getPartitionedRegionName()).localDestroyRegion());
   }
 
-  protected void createPR(VM vm0, final int redundancy, final int recoveryDelay, int numBuckets) {
-    SerializableRunnable createPR = getCreatePRRunnable(redundancy, recoveryDelay, numBuckets);
-
-    vm0.invoke(createPR);
+  protected void createPR(final VM vm, final int redundancy, final int recoveryDelay, final int numBuckets) {
+    vm.invoke(getCreatePRRunnable(redundancy, recoveryDelay, numBuckets));
   }
 
-  protected void createPR(VM vm0, final int redundancy, final int recoveryDelay, int numBuckets,
-      boolean synchronous) {
-    SerializableRunnable createPR =
-        getCreatePRRunnable(redundancy, recoveryDelay, numBuckets, synchronous);
-
-    vm0.invoke(createPR);
+  protected void createPR(final VM vm, final int redundancy, final int recoveryDelay, final int numBuckets,
+      final boolean synchronous) {
+    vm.invoke(getCreatePRRunnable(redundancy, recoveryDelay, numBuckets, synchronous));
   }
 
-  protected void createPR(VM vm0, final int redundancy, final int recoveryDelay) {
-    SerializableRunnable createPR = getCreatePRRunnable(redundancy, recoveryDelay);
-
-    vm0.invoke(createPR);
+  protected void createPR(final VM vm, final int redundancy, final int recoveryDelay) {
+    vm.invoke(getCreatePRRunnable(redundancy, recoveryDelay));
   }
 
-  protected void createPR(VM vm0, final int redundancy) {
-    SerializableRunnable createPR = getCreatePRRunnable(redundancy, -1);
-
-    vm0.invoke(createPR);
+  protected void createPR(final VM vm, final int redundancy) {
+    vm.invoke(getCreatePRRunnable(redundancy, -1));
   }
 
-  protected void createNestedPR(VM vm) {
-    SerializableRunnable createPR = getNestedPRRunnable();
-    vm.invoke(createPR);
+  void createNestedPR(final VM vm) {
+    vm.invoke(getNestedPRRunnable());
   }
 
-  protected AsyncInvocation createNestedPRAsync(VM vm) {
-    SerializableRunnable createPR = getNestedPRRunnable();
-    return vm.invokeAsync(createPR);
+  AsyncInvocation createNestedPRAsync(final VM vm) {
+    return vm.invokeAsync(getNestedPRRunnable());
   }
 
   private SerializableRunnable getNestedPRRunnable() {
-    SerializableRunnable createPR = new SerializableRunnable("create pr") {
-
+    return new SerializableRunnable("getNestedPRRunnable") {
+      @Override
       public void run() {
-        Cache cache = getCache();
 
         // Wait for both nested PRs to be created
         final CountDownLatch recoveryDone = new CountDownLatch(2);
 
-        ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
+        ResourceObserver observer = new ResourceObserverAdapter() {
           @Override
-          public void recoveryFinished(Region region) {
+          public void recoveryFinished(final Region region) {
             recoveryDone.countDown();
           }
         };
         InternalResourceManager.setResourceObserver(observer);
 
-        DiskStore ds = cache.findDiskStore("disk");
-        if (ds == null) {
-          ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
-        }
-        Region parent1;
-        {
-          AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.REPLICATE);
-          parent1 = cache.createRegion("parent1", af.create());
-        }
-        Region parent2;
-        {
-          AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.REPLICATE);
-          parent2 = cache.createRegion("parent2", af.create());
-        }
-        {
-          AttributesFactory af = new AttributesFactory();
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          paf.setRedundantCopies(1);
-          af.setPartitionAttributes(paf.create());
-          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-          af.setDiskStoreName("disk");
-          parent1.createSubregion(PR_REGION_NAME, af.create());
-        }
-        {
-          AttributesFactory af = new AttributesFactory();
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          paf.setRedundantCopies(1);
-          af.setPartitionAttributes(paf.create());
-          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-          af.setDiskStoreName("disk");
-          parent2.createSubregion(PR_REGION_NAME, af.create());
+        Cache cache = getCache();
+        DiskStore diskStore = cache.findDiskStore("disk");
+        if (diskStore == null) {
+          diskStore = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
         }
 
+        AttributesFactory attributesFactory = new AttributesFactory();
+        attributesFactory.setDataPolicy(DataPolicy.REPLICATE);
+
+        Region parent1 = cache.createRegion("parent1", attributesFactory.create());
+        Region parent2 = cache.createRegion("parent2", attributesFactory.create());
+
+        attributesFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+        attributesFactory.setDiskStoreName("disk");
+
+        PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+        partitionAttributesFactory.setRedundantCopies(1);
+        attributesFactory.setPartitionAttributes(partitionAttributesFactory.create());
+
+        parent1.createSubregion(getPartitionedRegionName(), attributesFactory.create());
+        parent2.createSubregion(getPartitionedRegionName(), attributesFactory.create());
+
         try {
           recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS);
         } catch (InterruptedException e) {
-          Assert.fail("interrupted", e);
+          throw new RuntimeException(e);
         }
       }
     };
-    return createPR;
   }
 
-  protected void createCoLocatedPR(VM vm, int setRedundantCopies,
-      boolean setPersistenceAdvisorObserver) {
+  void createCoLocatedPR(final VM vm, final int setRedundantCopies, final boolean setPersistenceAdvisorObserver) {
     vm.invoke(() -> {
       String dsName = "colacatedpr";
 
-      Cache cache = getCache();
-
       // Wait for both nested PRs to be created
       final CountDownLatch recoveryDone = new CountDownLatch(2);
-      ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
+      ResourceObserver observer = new ResourceObserverAdapter() {
         @Override
-        public void recoveryFinished(Region region) {
+        public void recoveryFinished(final Region region) {
           recoveryDone.countDown();
         }
       };
@@ -407,47 +269,53 @@ public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTes
       // And throw exception while region is getting initialized.
       final CountDownLatch childRegionCreated = new CountDownLatch(1);
       if (setPersistenceAdvisorObserver) {
-        PersistenceAdvisorImpl
-            .setPersistenceAdvisorObserver(new PersistenceAdvisorImpl.PersistenceAdvisorObserver() {
-              public void observe(String regionPath) {
-                if (regionPath.contains(PR_CHILD_REGION_NAME)) {
-                  try {
-                    childRegionCreated.await(MAX_WAIT, TimeUnit.MILLISECONDS);
-                  } catch (Exception e) {
-                    Assert.fail("Exception", e);
-                  }
-                  throw new ConflictingPersistentDataException(
-                      "Testing Cache Close with ConflictingPersistentDataException for region."
-                          + regionPath);
-                }
+        PersistenceAdvisorImpl.setPersistenceAdvisorObserver(new PersistenceAdvisorObserver() {
+          @Override
+          public void observe(String regionPath) {
+            if (regionPath.contains(getChildRegionName())) {
+              try {
+                childRegionCreated.await(MAX_WAIT, TimeUnit.MILLISECONDS);
+              } catch (InterruptedException e) {
+                throw new RuntimeException(e);
               }
-            });
+              throw new ConflictingPersistentDataException(
+                  "Testing Cache Close with ConflictingPersistentDataException for region "
+                      + regionPath);
+            }
+          }
+        });
       }
 
       // Create region.
       try {
-        DiskStore ds = cache.findDiskStore(dsName);
-        if (ds == null) {
-          ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(dsName);
+        Cache cache = getCache();
+
+        DiskStore diskStore = cache.findDiskStore(dsName);
+        if (diskStore == null) {
+          diskStore = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(dsName);
         }
 
         // Parent Region
-        PartitionAttributesFactory paf =
+        PartitionAttributesFactory partitionAttributesFactory =
             new PartitionAttributesFactory().setRedundantCopies(setRedundantCopies);
-        AttributesFactory af = new AttributesFactory();
-        af.setPartitionAttributes(paf.create());
-        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-        af.setDiskStoreName(dsName);
-        cache.createRegion(PR_REGION_NAME, af.create());
+
+        AttributesFactory attributesFactory = new AttributesFactory();
+        attributesFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+        attributesFactory.setDiskStoreName(dsName);
+        attributesFactory.setPartitionAttributes(partitionAttributesFactory.create());
+
+        cache.createRegion(getPartitionedRegionName(), attributesFactory.create());
 
         // Colocated region
-        paf = (new PartitionAttributesFactory()).setRedundantCopies(setRedundantCopies)
-            .setColocatedWith(PR_REGION_NAME);
-        af = new AttributesFactory();
-        af.setPartitionAttributes(paf.create());
-        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-        af.setDiskStoreName(dsName);
-        cache.createRegion(PR_CHILD_REGION_NAME, af.create());
+        partitionAttributesFactory = (new PartitionAttributesFactory()).setRedundantCopies(setRedundantCopies)
+            .setColocatedWith(getPartitionedRegionName());
+
+        attributesFactory = new AttributesFactory();
+        attributesFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+        attributesFactory.setDiskStoreName(dsName);
+        attributesFactory.setPartitionAttributes(partitionAttributesFactory.create());
+
+        cache.createRegion(getChildRegionName(), attributesFactory.create());
 
         // Count down on region create.
         childRegionCreated.countDown();
@@ -455,7 +323,7 @@ public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTes
         try {
           recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS);
         } catch (InterruptedException e) {
-          Assert.fail("interrupted", e);
+          throw new RuntimeException(e);
         }
 
       } finally {
@@ -465,7 +333,7 @@ public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTes
   }
 
   private SerializableRunnable getCreatePRRunnable(final int redundancy, final int recoveryDelay) {
-    return getCreatePRRunnable(redundancy, recoveryDelay, 113);
+    return getCreatePRRunnable(redundancy, recoveryDelay, NUM_BUCKETS);
   }
 
   private SerializableRunnable getCreatePRRunnable(final int redundancy, final int recoveryDelay,
@@ -475,14 +343,12 @@ public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTes
 
   private SerializableRunnable getCreatePRRunnable(final int redundancy, final int recoveryDelay,
       final int numBuckets, final boolean synchronous) {
-    SerializableRunnable createPR = new SerializableRunnable("create pr") {
-
+    return new SerializableRunnable("getCreatePRRunnable") {
+      @Override
       public void run() {
-        final CountDownLatch recoveryDone;
+        final CountDownLatch recoveryDone = new CountDownLatch(1);
         if (redundancy > 0) {
-          recoveryDone = new CountDownLatch(1);
-
-          ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
+          ResourceObserver observer = new ResourceObserverAdapter() {
             @Override
             public void recoveryFinished(Region region) {
               recoveryDone.countDown();
@@ -490,391 +356,290 @@ public abstract class PersistentPartitionedRegionTestBase extends JUnit4CacheTes
           };
           InternalResourceManager.setResourceObserver(observer);
         } else {
-          recoveryDone = null;
+          recoveryDone.countDown();
         }
 
         Cache cache = getCache();
 
-        RegionAttributes attr =
+        RegionAttributes regionAttributes =
             getPersistentPRAttributes(redundancy, recoveryDelay, cache, numBuckets, synchronous);
-        cache.createRegion(PR_REGION_NAME, attr);
-        if (recoveryDone != null) {
-          try {
-            recoveryDone.await();
-          } catch (InterruptedException e) {
-            Assert.fail("Interrupted", e);
-          }
+        cache.createRegion(getPartitionedRegionName(), regionAttributes);
+
+        try {
+          recoveryDone.await();
+        } catch (InterruptedException e) {
+          throw new RuntimeException(e);
         }
       }
     };
-    return createPR;
   }
 
   protected RegionAttributes getPersistentPRAttributes(final int redundancy,
-      final int recoveryDelay, Cache cache, int numBuckets, boolean synchronous) {
-    DiskStore ds = cache.findDiskStore("disk");
-    if (ds == null) {
-      ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+      final int recoveryDelay, final Cache cache, final int numBuckets, final boolean synchronous) {
+    DiskStore diskStore = cache.findDiskStore("disk");
+    if (diskStore == null) {
+      diskStore = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
     }
-    AttributesFactory af = new AttributesFactory();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setRedundantCopies(redundancy);
-    paf.setRecoveryDelay(recoveryDelay);
-    paf.setTotalNumBuckets(numBuckets);
+
+    PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+    partitionAttributesFactory.setRedundantCopies(redundancy);
+    partitionAttributesFactory.setRecoveryDelay(recoveryDelay);
+    partitionAttributesFactory.setTotalNumBuckets(numBuckets);
     // Make sure all vms end up with the same local max memory
-    paf.setLocalMaxMemory(500);
-    af.setPartitionAttributes(paf.create());
-    af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-    af.setDiskStoreName("disk");
-    af.setDiskSynchronous(synchronous);
-    RegionAttributes attr = af.create();
-    return attr;
-  }
+    partitionAttributesFactory.setLocalMaxMemory(500);
 
-  protected AsyncInvocation createPRAsync(VM vm0, final int redundancy, int recoveryDelay,
-      int numBuckets) {
-    SerializableRunnable createPR = getCreatePRRunnable(redundancy, recoveryDelay, numBuckets);
-    return vm0.invokeAsync(createPR);
-  }
+    AttributesFactory attributesFactory = new AttributesFactory();
+    attributesFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+    attributesFactory.setDiskStoreName("disk");
+    attributesFactory.setDiskSynchronous(synchronous);
+    attributesFactory.setPartitionAttributes(partitionAttributesFactory.create());
 
-  protected AsyncInvocation createPRAsync(VM vm0, final int redundancy) {
-    SerializableRunnable createPR = getCreatePRRunnable(redundancy, -1);
-    return vm0.invokeAsync(createPR);
+    return attributesFactory.create();
   }
 
-  protected Set<Integer> getBucketList(VM vm0) {
-    return getBucketList(vm0, PR_REGION_NAME);
+  AsyncInvocation createPRAsync(final VM vm, final int redundancy, int recoveryDelay, int numBuckets) {
+    return vm.invokeAsync(getCreatePRRunnable(redundancy, recoveryDelay, numBuckets));
   }
 
-  protected Set<Integer> getBucketList(VM vm0, final String regionName) {
-    SerializableCallable getBuckets = new SerializableCallable("get buckets") {
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        return new TreeSet<Integer>(region.getDataStore().getAllLocalBucketIds());
-      }
-    };
-
-    return (Set<Integer>) vm0.invoke(getBuckets);
+  AsyncInvocation createPRAsync(final VM vm, final int redundancy) {
+    return vm.invokeAsync(getCreatePRRunnable(redundancy, -1));
   }
 
-  protected void waitForBuckets(VM vm, final Set<Integer> expectedBuckets,
-      final String regionName) {
-    SerializableCallable getBuckets = new SerializableCallable("get buckets") {
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        Wait.waitForCriterion(new WaitCriterion() {
-
-          public boolean done() {
-            return expectedBuckets.equals(getActualBuckets());
-          }
-
-          public String description() {
-            return "Buckets on vm " + getActualBuckets() + " never became equal to expected "
-                + expectedBuckets;
-          }
+  protected Set<Integer> getBucketList(final VM vm) {
+    return getBucketList(vm, getPartitionedRegionName());
+  }
 
-          public TreeSet<Integer> getActualBuckets() {
-            return new TreeSet<Integer>(region.getDataStore().getAllLocalBucketIds());
-          }
-        }, 30 * 1000, 100, true);
+  protected Set<Integer> getBucketList(final VM vm, final String regionName) {
+    return vm.invoke("getBucketList", () -> {
+      PartitionedRegion region = (PartitionedRegion) getCache().getRegion(regionName);
+      return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
+    });
+  }
 
-        return null;
-      }
-    };
+  void waitForBuckets(final VM vm, final Set<Integer> expectedBuckets, final String regionName) {
+    vm.invoke("waitForBuckets", () -> {
+      Cache cache = getCache();
+      final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
 
-    vm.invoke(getBuckets);
+      Wait.waitForCriterion(new WaitCriterion() {
+        @Override
+        public boolean done() {
+          return expectedBuckets.equals(getActualBuckets());
+        }
+        @Override
+        public String description() {
+          return "Buckets on vm " + getActualBuckets() + " never became equal to expected "
+              + expectedBuckets;
+        }
+        Set<Integer> getActualBuckets() {
+          return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
+        }
+      }, 30 * 1000, 100, true);
+    });
   }
 
-  protected Set<Integer> getPrimaryBucketList(VM vm0) {
-    return getPrimaryBucketList(vm0, PR_REGION_NAME);
+  Set<Integer> getPrimaryBucketList(final VM vm) {
+    return getPrimaryBucketList(vm, getPartitionedRegionName());
   }
 
-  protected Set<Integer> getPrimaryBucketList(VM vm0, final String regionName) {
-    SerializableCallable getPrimaryBuckets = new SerializableCallable("get primary buckets") {
-
-      public Object call() throws Exception {
+  Set<Integer> getPrimaryBucketList(final VM vm, final String regionName) {
+    return vm.invoke("getPrimaryBucketList", () -> {
         Cache cache = getCache();
         PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        return new TreeSet<Integer>(region.getDataStore().getAllLocalPrimaryBucketIds());
-      }
-    };
-
-    return (Set<Integer>) vm0.invoke(getPrimaryBuckets);
+        return new TreeSet<>(region.getDataStore().getAllLocalPrimaryBucketIds());
+    });
   }
 
+  void revokeKnownMissingMembers(final VM vm, final int numExpectedMissing) {
+    vm.invoke("revokeKnownMissingMembers", () -> {
+      DistributedSystemConfig config = defineDistributedSystem(getSystem(), "");
+      AdminDistributedSystem adminDS = getDistributedSystem(config);
+      adminDS.connect();
+      try {
+        adminDS.waitToBeConnected(MAX_WAIT);
 
-  protected void revokeKnownMissingMembers(VM vm2, final int numExpectedMissing) {
-    vm2.invoke(new SerializableRunnable("Revoke the member") {
-
-      public void run() {
-        final DistributedSystemConfig config;
-        final AdminDistributedSystem adminDS;
-        try {
-          config = AdminDistributedSystemFactory.defineDistributedSystem(getSystem(), "");
-          adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
-          adminDS.connect();
-          adminDS.waitToBeConnected(MAX_WAIT);
-          try {
-            final WaitCriterion wc = new WaitCriterion() {
-
-              public boolean done() {
-                try {
-                  final Set<PersistentID> missingIds = adminDS.getMissingPersistentMembers();
-                  if (missingIds.size() != numExpectedMissing) {
-                    return false;
-                  }
-                  for (PersistentID missingId : missingIds) {
-                    adminDS.revokePersistentMember(missingId.getUUID());
-                  }
-                  return true;
-                } catch (AdminException ae) {
-                  throw new RuntimeException(ae);
-                }
+        final WaitCriterion wc = new WaitCriterion() {
+          @Override
+          public boolean done() {
+            try {
+              Set<PersistentID> missingIds = adminDS.getMissingPersistentMembers();
+              if (missingIds.size() != numExpectedMissing) {
+                return false;
               }
-
-              public String description() {
-                try {
-                  return "expected " + numExpectedMissing
-                      + " missing members for revocation, current: "
-                      + adminDS.getMissingPersistentMembers();
-                } catch (AdminException ae) {
-                  throw new RuntimeException(ae);
-                }
+              for (PersistentID missingId : missingIds) {
+                adminDS.revokePersistentMember(missingId.getUUID());
               }
-            };
-            Wait.waitForCriterion(wc, MAX_WAIT, 500, true);
-          } finally {
-            adminDS.disconnect();
+              return true;
+            } catch (AdminException e) {
+              throw new RuntimeException(e);
+            }
           }
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
+          @Override
+          public String description() {
+            try {
+              return "expected " + numExpectedMissing
+                  + " missing members for revocation, current: "
+                  + adminDS.getMissingPersistentMembers();
+            } catch (AdminException e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+        Wait.waitForCriterion(wc, MAX_WAIT, 500, true);
+
+      } finally {
+        adminDS.disconnect();
       }
     });
   }
 
-  protected void revokeAllMembers(VM vm) {
-    vm.invoke(new SerializableRunnable("Revoke the member") {
+  void revokeAllMembers(final VM vm) {
+    vm.invoke("revokeAllMembers", () -> {
+      InternalCache cache = getCache(); // TODO:KIRK: delete this line
+      DistributedSystemConfig config = defineDistributedSystem(getSystem(), "");
+      AdminDistributedSystem adminDS = getDistributedSystem(config);
+      adminDS.connect();
 
-      public void run() {
-        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-        DistributedSystemConfig config;
-        AdminDistributedSystem adminDS = null;
-        try {
-          config = AdminDistributedSystemFactory.defineDistributedSystem(getSystem(), "");
-          adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
-          adminDS.connect();
-          adminDS.waitToBeConnected(MAX_WAIT);
-          adminDS.revokePersistentMember(InetAddress.getLocalHost(), null);
-        } catch (RuntimeException e) {
-          throw e;
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        } finally {
-          if (adminDS != null) {
-            adminDS.disconnect();
-          }
-        }
+      try {
+        adminDS.waitToBeConnected(MAX_WAIT);
+        adminDS.revokePersistentMember(InetAddress.getLocalHost(), null);
+      } finally {
+        adminDS.disconnect();
       }
     });
   }
 
-  protected void revokeMember(VM vm, final File directory) {
-    vm.invoke(new SerializableRunnable("Revoke the member") {
-
-      public void run() {
-        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-        DistributedSystemConfig config;
-        AdminDistributedSystem adminDS = null;
-        try {
-          config = AdminDistributedSystemFactory.defineDistributedSystem(getSystem(), "");
-          adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
-          adminDS.connect();
-          adminDS.waitToBeConnected(MAX_WAIT);
-          adminDS.revokePersistentMember(InetAddress.getLocalHost(), directory.getCanonicalPath());
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        } finally {
-          if (adminDS != null) {
-            adminDS.disconnect();
-          }
-        }
+  void revokeMember(final VM vm, final File directory) {
+    vm.invoke("revokeMember", () -> {
+      InternalCache cache = getCache(); // TODO:KIRK: delete this line
+      DistributedSystemConfig config = defineDistributedSystem(getSystem(), "");
+      AdminDistributedSystem adminDS = getDistributedSystem(config);
+      adminDS.connect();
+      try {
+        adminDS.waitToBeConnected(MAX_WAIT);
+        adminDS.revokePersistentMember(InetAddress.getLocalHost(), directory.getCanonicalPath());
+      } finally {
+        adminDS.disconnect();
       }
     });
   }
 
-  protected boolean moveBucket(final int bucketId, VM source, VM target) {
-
-    SerializableCallable getId = new SerializableCallable("Get Id") {
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        return cache.getDistributedSystem().getDistributedMember();
-      }
-    };
-
-    final InternalDistributedMember sourceId = (InternalDistributedMember) source.invoke(getId);
-
-    SerializableCallable move = new SerializableCallable("move bucket") {
-
-      public Object call() {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        return region.getDataStore().moveBucket(bucketId, sourceId, false);
-      }
-    };
-
-    return (Boolean) target.invoke(move);
+  protected boolean moveBucket(final int bucketId, final VM source, final VM target) {
+    InternalDistributedMember sourceId = getInternalDistributedMember(source);
 
+    return target.invoke("moveBucket", () -> {
+      PartitionedRegion region = (PartitionedRegion) getCache().getRegion(getPartitionedRegionName());
+      return region.getDataStore().moveBucket(bucketId, sourceId, false);
+    });
   }
 
-  protected Set<PersistentMemberID> getOfflineMembers(final int bucketId, VM vm) {
-
-    SerializableCallable getId = new SerializableCallable("Get Id") {
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        return region.getRegionAdvisor().getProxyBucketArray()[bucketId].getPersistenceAdvisor()
-            .getMembershipView().getOfflineMembers();
-      }
-    };
-
-
-    return (Set<PersistentMemberID>) vm.invoke(getId);
-
-
+  private InternalDistributedMember getInternalDistributedMember(final VM vm) {
+    return (InternalDistributedMember) vm.invoke("getDistributedMember",
+        () -> getCache().getDistributedSystem().getDistributedMember());
   }
 
-  protected Set<PersistentMemberID> getOnlineMembers(final int bucketId, VM vm) {
-
-    SerializableCallable getId = new SerializableCallable("Get Id") {
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(PR_REGION_NAME);
-        return region.getRegionAdvisor().getProxyBucketArray()[bucketId].getPersistenceAdvisor()
-            .getPersistedOnlineOrEqualMembers();
-      }
-    };
-
-
-    return (Set<PersistentMemberID>) vm.invoke(getId);
+  Set<PersistentMemberID> getOfflineMembers(final int bucketId, final VM vm) {
+    return vm.invoke("getOfflineMembers", () -> {
+      PartitionedRegion region = (PartitionedRegion) getCache().getRegion(getPartitionedRegionName());
+      return region.getRegionAdvisor().getProxyBucketArray()[bucketId].getPersistenceAdvisor().getMembershipView().getOfflineMembers();
+    });
   }
 
-  protected void waitForBucketRecovery(final VM vm2, final Set<Integer> lostBuckets) {
-    waitForBucketRecovery(vm2, lostBuckets, PR_REGION_NAME);
+  Set<PersistentMemberID> getOnlineMembers(final int bucketId, final VM vm) {
+    return vm.invoke("getOnlineMembers", () -> {
+      PartitionedRegion region = (PartitionedRegion) getCache().getRegion(getPartitionedRegionName());
+      return region.getRegionAdvisor().getProxyBucketArray()[bucketId].getPersistenceAdvisor().getPersistedOnlineOrEqualMembers();
+    });
   }
 
-  protected void waitForBucketRecovery(final VM vm2, final Set<Integer> lostBuckets,
-      final String regionName) {
-    vm2.invoke(new SerializableRunnable() {
-      public void run() {
-        Cache cache = getCache();
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        final PartitionedRegionDataStore dataStore = region.getDataStore();
-        Wait.waitForCriterion(new WaitCriterion() {
-
-          public boolean done() {
-            Set<Integer> vm2Buckets = dataStore.getAllLocalBucketIds();
-            return lostBuckets.equals(vm2Buckets);
-          }
-
-          public String description() {
-            return "expected to recover " + lostBuckets + " buckets, now have "
-                + dataStore.getAllLocalBucketIds();
-          }
-        }, MAX_WAIT, 100, true);
-      }
-    });
+  void waitForBucketRecovery(final VM vm, final Set<Integer> lostBuckets) {
+    waitForBucketRecovery(vm, lostBuckets, getPartitionedRegionName());
   }
 
-  protected void waitForRedundancyRecovery(VM vm, final int expectedRedundancy,
+  private void waitForBucketRecovery(final VM vm, final Set<Integer> lostBuckets,
       final String regionName) {
-    vm.invoke(new SerializableRunnable() {
-
-      public void run() {
-        Cache cache = getCache();
-        final Region region = cache.getRegion(regionName);
-        Wait.waitForCriterion(new WaitCriterion() {
+    vm.invoke("waitForBucketRecovery", () -> {
+      PartitionedRegion region = (PartitionedRegion) getCache().getRegion(regionName);
+      PartitionedRegionDataStore dataStore = region.getDataStore();
 
-          public boolean done() {
-            PartitionRegionInfo info = PartitionRegionHelper.getPartitionRegionInfo(region);
-            return info.getActualRedundantCopies() == expectedRedundancy;
-          }
-
-          public String description() {
-            PartitionRegionInfo info = PartitionRegionHelper.getPartitionRegionInfo(region);
-            return "Did not reach expected redundancy " + expectedRedundancy + " redundancy info = "
-                + info.getActualRedundantCopies();
-          }
-        }, 30 * 1000, 100, true);
-      }
+      Wait.waitForCriterion(new WaitCriterion() {
+        @Override
+        public boolean done() {
+          Set<Integer> vm2Buckets = dataStore.getAllLocalBucketIds();
+          return lostBuckets.equals(vm2Buckets);
+        }
+        @Override
+        public String description() {
+          return "expected to recover " + lostBuckets + " buckets, now have "
+              + dataStore.getAllLocalBucketIds();
+        }
+      }, MAX_WAIT, 100, true);
     });
   }
 
-  protected void invalidateData(VM vm, final int startKey, final int endKey) {
-    SerializableRunnable createData = new SerializableRunnable() {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(PR_REGION_NAME);
+  void waitForRedundancyRecovery(final VM vm, final int expectedRedundancy, final String regionName) {
+    vm.invoke("waitForRedundancyRecovery", () -> {
+      Region region = getCache().getRegion(regionName);
 
-        for (int i = startKey; i < endKey; i++) {
-          region.destroy(i);
-          region.create(i, null);
-          region.invalidate(i);
+      Wait.waitForCriterion(new WaitCriterion() {
+        @Override
+        public boolean done() {
+          PartitionRegionInfo info = PartitionRegionHelper.getPartitionRegionInfo(region);
+          return info.getActualRedundantCopies() == expectedRedundancy;
         }
-      }
-    };
-    vm.invoke(createData);
+        @Override
+        public String description() {
+          PartitionRegionInfo info = PartitionRegionHelper.getPartitionRegionInfo(region);
+          return "Did not reach expected redundancy " + expectedRedundancy + " redundancy info = "
+              + info.getActualRedundantCopies();
+        }
+      }, 30 * 1000, 100, true);
+    });
   }
 
-  // used for above test
-  protected BackupStatus backup(VM vm) {
-    return (BackupStatus) vm.invoke(new SerializableCallable("Backup all members") {
-
-      public Object call() {
-        try {
-          return BackupUtil.backupAllMembers(getSystem().getDistributionManager(), getBackupDir(),
-              null);
-        } catch (ManagementException e) {
-          throw new RuntimeException(e);
-        }
+  protected BackupStatus backup(final VM vm) {
+    return vm.invoke("backup", () -> {
+      try {
+        return BackupUtil.backupAllMembers(getSystem().getDistributionManager(), getBackupDir(),
+            null);
+      } catch (ManagementException e) {
+        throw new RuntimeException(e);
       }
     });
   }
 
-  protected void restoreBackup(int expectedNumScripts) throws IOException, InterruptedException {
-    Collection<File> restoreScripts = FileUtils.listFiles(getBackupDir(),
-        new RegexFileFilter(".*restore.*"), DirectoryFileFilter.DIRECTORY);
-    assertEquals("Restore scripts " + restoreScripts, expectedNumScripts, restoreScripts.size());
+  protected void restoreBackup(final int expectedNumScripts) throws IOException, InterruptedException {
+    Collection<File> restoreScripts =
+        listFiles(getBackupDir(), new RegexFileFilter(".*restore.*"), DIRECTORY);
+    assertThat(restoreScripts).hasSize(expectedNumScripts);
     for (File script : restoreScripts) {
       execute(script);
     }
-
   }
 
-  private void execute(File script) throws IOException, InterruptedException {
-    ProcessBuilder pb = new ProcessBuilder(script.getAbsolutePath());
-    pb.redirectErrorStream(true);
-    Process process = pb.start();
+  private void execute(final File script) throws IOException, InterruptedException {
+    ProcessBuilder processBuilder = new ProcessBuilder(script.getAbsolutePath());
+    processBuilder.redirectErrorStream(true);
+    Process process = processBuilder.start();
+
+    try (BufferedReader reader =
+        new BufferedReader(new InputStreamReader(process.getInputStream()))) {
+      String line;
+      while ((line = reader.readLine()) != null) {
+        logger.info("OUTPUT:" + line);
+        // TODO validate output
+      }
+    }
 
-    InputStream is = process.getInputStream();
-    byte[] buffer = new byte[1024];
-    BufferedReader br = new BufferedReader(new InputStreamReader(is));
-    String line;
-    while ((line = br.readLine()) != null) {
-      LogWriterUtils.getLogWriter().fine("OUTPUT:" + line);
-      // TODO validate output
-    } ;
+    assertThat(process.waitFor()).isEqualTo(0);
+  }
 
-    assertEquals(0, process.waitFor());
+  public String getPartitionedRegionName() {
+    return partitionedRegionName;
+  }
 
+  String getChildRegionName() {
+    return CHILD_REGION_NAME;
   }
 
   protected static File getBackupDir() {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
index 5da461b..135bbca 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
@@ -22,7 +22,9 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheTransactionManager;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.TXManagerImpl;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.Host;
@@ -30,8 +32,6 @@ import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
@@ -120,7 +120,7 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest
         Cache cache = getCache();
 
         CacheTransactionManager tx = cache.getCacheTransactionManager();
-        Region region = cache.getRegion(PR_REGION_NAME);
+        Region region = cache.getRegion(getPartitionedRegionName());
 
         for (int i = startKey; i < endKey; i++) {
           tx.begin();
@@ -169,7 +169,7 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest
   }
 
   @Override
-  protected void checkData(VM vm0, final int startKey, final int endKey, final String value,
+  protected void checkData(VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
     SerializableRunnable checkData = new SerializableRunnable() {
 
@@ -187,6 +187,24 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest
       }
     };
 
-    vm0.invoke(checkData);
+    vm.invoke(checkData);
+  }
+
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
+      }
+    });
   }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PartitionedBackupPrepareAndFinishMsgDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PartitionedBackupPrepareAndFinishMsgDUnitTest.java
deleted file mode 100644
index 4b42c21..0000000
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PartitionedBackupPrepareAndFinishMsgDUnitTest.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.persistence;
-
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionShortcut;
-
-public class PartitionedBackupPrepareAndFinishMsgDUnitTest
-    extends BackupPrepareAndFinishMsgDUnitTest {
-  private static final RegionShortcut REGION_TYPE = RegionShortcut.PARTITION_PERSISTENT;
-
-  @Override
-  public Region<Integer, Integer> createRegion() {
-    return createRegion(REGION_TYPE);
-  }
-}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
index df17039..9dcf90b 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
@@ -1213,7 +1213,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
           }
 
           @Override
-          public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {}
+          public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {}
         });
       }
     });
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/ReplicateBackupPrepareAndFinishMsgDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/ReplicateBackupPrepareAndFinishMsgDUnitTest.java
deleted file mode 100644
index 3f0ba7d..0000000
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/persistence/ReplicateBackupPrepareAndFinishMsgDUnitTest.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.persistence;
-
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionShortcut;
-
-public class ReplicateBackupPrepareAndFinishMsgDUnitTest
-    extends BackupPrepareAndFinishMsgDUnitTest {
-  private static final RegionShortcut REGION_TYPE = RegionShortcut.REPLICATE_PERSISTENT;
-
-  @Override
-  public Region<Integer, Integer> createRegion() {
-    return createRegion(REGION_TYPE);
-  }
-}
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeJUnitTest.java
index 2c34ddd..5e0870e 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeJUnitTest.java
@@ -26,12 +26,12 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.InOrder;
 
-import org.apache.geode.admin.internal.BackupDataStoreHelper;
-import org.apache.geode.admin.internal.FinishBackupRequest;
-import org.apache.geode.admin.internal.PrepareBackupRequest;
+import org.apache.geode.internal.cache.backup.BackupDataStoreHelper;
+import org.apache.geode.internal.cache.backup.FinishBackupRequest;
+import org.apache.geode.internal.cache.backup.PrepareBackupRequest;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.locks.DLockService;
-import org.apache.geode.internal.cache.BackupManager;
+import org.apache.geode.internal.cache.backup.BackupManager;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.persistence.PersistentMemberManager;
 import org.apache.geode.test.fake.Fakes;
diff --git a/geode-core/src/test/java/org/apache/geode/pdx/ClientsWithVersioningRetryDUnitTest.java b/geode-core/src/test/java/org/apache/geode/pdx/ClientsWithVersioningRetryDUnitTest.java
index b67cc48..37c7960 100644
--- a/geode-core/src/test/java/org/apache/geode/pdx/ClientsWithVersioningRetryDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/pdx/ClientsWithVersioningRetryDUnitTest.java
@@ -215,8 +215,8 @@ public class ClientsWithVersioningRetryDUnitTest extends JUnit4CacheTestCase {
         DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
 
           @Override
-          public void beforeSendMessage(DistributionManager dm, DistributionMessage msg) {
-            if (msg instanceof DistributedPutAllOperation.PutAllMessage) {
+          public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
+            if (message instanceof DistributedPutAllOperation.PutAllMessage) {
               DistributionMessageObserver.setInstance(null);
               disconnectFromDS(vm1);
             }
diff --git a/geode-core/src/test/resources/org/apache/geode/internal/cache/BackupJUnitTest.cache.xml b/geode-core/src/test/resources/org/apache/geode/internal/cache/backup/BackupIntegrationTest.cache.xml
similarity index 100%
rename from geode-core/src/test/resources/org/apache/geode/internal/cache/BackupJUnitTest.cache.xml
rename to geode-core/src/test/resources/org/apache/geode/internal/cache/backup/BackupIntegrationTest.cache.xml

-- 
To stop receiving notification emails like this one, please contact
['"commits@geode.apache.org" <co...@geode.apache.org>'].