You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2019/05/07 16:22:14 UTC

[geode] 02/03: GEODE-6731: Cleanup tests using ResourceUtils

This is an automated email from the ASF dual-hosted git repository.

klund pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 49a3eaad3c492bbe96a58da8c48461f34026c3cf
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Mon May 6 11:52:16 2019 -0700

    GEODE-6731: Cleanup tests using ResourceUtils
---
 .../geode/session/tests/ContainerInstall.java      |   71 +-
 .../GradleBuildWithGeodeCoreAcceptanceTest.java    |    6 +-
 ...tandaloneClientManagementAPIAcceptanceTest.java |    4 +-
 .../internal/web/RestFunctionExecuteDUnitTest.java |   30 +-
 .../web/controllers/RestAPIsWithSSLDUnitTest.java  |   19 +-
 .../geode/AssemblyContentsIntegrationTest.java     |   12 +-
 .../org/apache/geode/BundledJarsJUnitTest.java     |   42 +-
 .../geode/GeodeDependencyJarIntegrationTest.java   |    8 +-
 .../rest/internal/web/RestSecurityWithSSLTest.java |    3 +-
 .../tools/pulse/PulseSecurityWithSSLTest.java      |    4 +-
 .../cli/CreateMappingCommandDUnitTest.java         |  308 ++--
 .../geode/ClusterCommunicationsDUnitTest.java      |  170 +-
 .../CacheServerSSLConnectionDUnitTest.java         |  341 ++--
 .../client/internal/SSLNoClientAuthDUnitTest.java  |  185 +-
 ...artitionedRegionCompactRangeIndexDUnitTest.java |  117 +-
 .../query/partitioned/PRQueryDUnitHelper.java      |  460 +++--
 .../apache/geode/cache30/CacheXml66DUnitTest.java  |  117 +-
 .../cache30/ReconnectWithCacheXMLDUnitTest.java    |   25 +-
 .../apache/geode/distributed/LocatorDUnitTest.java | 1908 ++++++++------------
 .../distributed/LocatorUDPSecurityDUnitTest.java   |   26 +-
 .../PartitionedRegionCacheXMLExampleDUnitTest.java |   10 +-
 .../cache/StartServerWithXmlDUnitTest.java         |   24 +-
 .../internal/membership/MembershipTestHook.java    |   14 +-
 23 files changed, 1697 insertions(+), 2207 deletions(-)

diff --git a/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/ContainerInstall.java b/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/ContainerInstall.java
index 09d8e95..c52e1f4 100644
--- a/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/ContainerInstall.java
+++ b/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/ContainerInstall.java
@@ -58,14 +58,15 @@ import org.apache.geode.management.internal.configuration.utils.ZipUtils;
  * Subclasses provide installation of specific containers.
  */
 public abstract class ContainerInstall {
+
   private final IntSupplier portSupplier;
-  public static final Logger logger = LogService.getLogger();
-  public static final String TMP_DIR = createTempDir();
-  public static final String GEODE_BUILD_HOME = System.getenv("GEODE_HOME");
-  public static final String GEODE_BUILD_HOME_LIB = GEODE_BUILD_HOME + "/lib/";
-  public static final String DEFAULT_INSTALL_DIR = TMP_DIR + "/cargo_containers/";
-  public static final String DEFAULT_MODULE_EXTRACTION_DIR = TMP_DIR + "/cargo_modules/";
-  protected static final String DEFAULT_MODULE_LOCATION = GEODE_BUILD_HOME + "/tools/Modules/";
+  static final Logger logger = LogService.getLogger();
+  static final String TMP_DIR = createTempDir();
+  static final String GEODE_BUILD_HOME = System.getenv("GEODE_HOME");
+  static final String GEODE_BUILD_HOME_LIB = GEODE_BUILD_HOME + "/lib/";
+  private static final String DEFAULT_INSTALL_DIR = TMP_DIR + "/cargo_containers/";
+  private static final String DEFAULT_MODULE_EXTRACTION_DIR = TMP_DIR + "/cargo_modules/";
+  static final String DEFAULT_MODULE_LOCATION = GEODE_BUILD_HOME + "/tools/Modules/";
 
   protected IntSupplier portSupplier() {
     return portSupplier;
@@ -120,8 +121,6 @@ public abstract class ContainerInstall {
     public boolean isClientServer() {
       return isClientServer;
     }
-
-
   }
 
   public ContainerInstall(String name, String downloadURL, ConnectionType connectionType,
@@ -169,14 +168,14 @@ public abstract class ContainerInstall {
     logger.info("Installed container into " + getHome());
   }
 
-  public ServerContainer generateContainer(String containerDescriptors) throws IOException {
+  ServerContainer generateContainer(String containerDescriptors) throws IOException {
     return generateContainer(null, containerDescriptors);
   }
 
   /**
    * Cleans up the installation by deleting the extracted module and downloaded installation folders
    */
-  public void clearPreviousInstall(String installDir) throws IOException {
+  private void clearPreviousInstall(String installDir) throws IOException {
     File installFolder = new File(installDir);
     // Remove installs from previous runs in the same folder
     if (installFolder.exists()) {
@@ -185,7 +184,7 @@ public abstract class ContainerInstall {
     }
   }
 
-  public void setDefaultLocatorPort(int port) {
+  void setDefaultLocatorPort(int port) {
     defaultLocatorPort = port;
   }
 
@@ -195,7 +194,7 @@ public abstract class ContainerInstall {
    * Since an installation can only be client server or peer to peer there is no need for a function
    * which checks for a peer to peer installation (just check if not client server).
    */
-  public boolean isClientServer() {
+  boolean isClientServer() {
     return connType.isClientServer();
   }
 
@@ -212,14 +211,14 @@ public abstract class ContainerInstall {
    * The module contains jars needed for geode session setup as well as default templates for some
    * needed XML files.
    */
-  public String getModulePath() {
+  String getModulePath() {
     return modulePath;
   }
 
   /**
    * The path to the session testing WAR file
    */
-  public String getWarFilePath() {
+  String getWarFilePath() {
     return warFilePath;
   }
 
@@ -227,7 +226,7 @@ public abstract class ContainerInstall {
    * @return The enum {@link #connType} which represents the type of connection for this
    *         installation
    */
-  public ConnectionType getConnectionType() {
+  ConnectionType getConnectionType() {
     return connType;
   }
 
@@ -237,7 +236,7 @@ public abstract class ContainerInstall {
    * This is the address that a container uses by default. Containers themselves can have their own
    * personal locator address, but will default to this address unless specifically set.
    */
-  public String getDefaultLocatorAddress() {
+  String getDefaultLocatorAddress() {
     return defaultLocatorAddress;
   }
 
@@ -247,14 +246,14 @@ public abstract class ContainerInstall {
    * This is the port that a container uses by default. Containers themselves can have their own
    * personal locator port, but will default to this port unless specifically set.
    */
-  public int getDefaultLocatorPort() {
+  int getDefaultLocatorPort() {
     return defaultLocatorPort;
   }
 
   /**
    * Gets the cache XML file to use by default for this installation
    */
-  public File getCacheXMLFile() {
+  File getCacheXMLFile() {
     return new File(modulePath + "/conf/" + getConnectionType().getCacheXMLFileName());
   }
 
@@ -287,7 +286,7 @@ public abstract class ContainerInstall {
    * NOTE::This walks into the extensions folder and then uses a hardcoded path from there making it
    * very unreliable if things are moved.
    */
-  protected static String findSessionTestingWar() {
+  private static String findSessionTestingWar() {
     // Start out searching directory above current
     String curPath = "../";
 
@@ -322,15 +321,15 @@ public abstract class ContainerInstall {
    *        extract. Used as a search parameter to find the module archive.
    * @return The path to the non-archive (extracted) version of the module files
    */
-  protected static String findAndExtractModule(String geodeModuleLocation, String moduleName)
+  private static String findAndExtractModule(String geodeModuleLocation, String moduleName)
       throws IOException {
-    File modulePath = null;
     File modulesDir = new File(geodeModuleLocation);
 
-    boolean archive = false;
     logger.info("Trying to access build dir " + modulesDir);
 
     // Search directory for tomcat module folder/zip
+    boolean archive = false;
+    File modulePath = null;
     for (File file : modulesDir.listFiles()) {
 
       if (file.getName().toLowerCase().contains(moduleName)) {
@@ -386,7 +385,7 @@ public abstract class ContainerInstall {
    *        property value the current value. If false, replaces the current property value with the
    *        given property value
    */
-  protected static void editPropertyFile(String filePath, String propertyName, String propertyValue,
+  static void editPropertyFile(String filePath, String propertyName, String propertyValue,
       boolean append) throws Exception {
     FileInputStream input = new FileInputStream(filePath);
     Properties properties = new Properties();
@@ -405,29 +404,17 @@ public abstract class ContainerInstall {
     logger.info("Modified container Property file " + filePath);
   }
 
-  protected static void editXMLFile(String XMLPath, String tagId, String tagName,
+  static void editXMLFile(String XMLPath, String tagId, String tagName,
       String parentTagName, HashMap<String, String> attributes) {
     editXMLFile(XMLPath, tagId, tagName, tagName, parentTagName, attributes, false);
   }
 
-  protected static void editXMLFile(String XMLPath, String tagName, String parentTagName,
-      HashMap<String, String> attributes) {
-    editXMLFile(XMLPath, tagName, parentTagName, attributes, false);
-  }
-
-  protected static void editXMLFile(String XMLPath, String tagName, String parentTagName,
+  static void editXMLFile(String XMLPath, String tagName, String parentTagName,
       HashMap<String, String> attributes, boolean writeOnSimilarAttributeNames) {
     editXMLFile(XMLPath, null, tagName, tagName, parentTagName, attributes,
         writeOnSimilarAttributeNames);
   }
 
-  protected static void editXMLFile(String XMLPath, String tagName, String replacementTagName,
-      String parentTagName, HashMap<String, String> attributes,
-      boolean writeOnSimilarAttributeNames) {
-    editXMLFile(XMLPath, null, tagName, replacementTagName, parentTagName, attributes,
-        writeOnSimilarAttributeNames);
-  }
-
   /**
    * Edit the given xml file
    *
@@ -448,7 +435,7 @@ public abstract class ContainerInstall {
    *        rather than adding a new element. If false, create a new XML element (unless tagId is
    *        not null).
    */
-  protected static void editXMLFile(String XMLPath, String tagId, String tagName,
+  private static void editXMLFile(String XMLPath, String tagId, String tagName,
       String replacementTagName, String parentTagName, HashMap<String, String> attributes,
       boolean writeOnSimilarAttributeNames) {
 
@@ -586,7 +573,7 @@ public abstract class ContainerInstall {
     for (String key : attributes.keySet()) {
       Node attr = nodeAttrs.getNamedItem(key);
       if (attr == null
-          || (checkSimilarValues && !attr.getTextContent().equals(attributes.get(key)))) {
+          || checkSimilarValues && !attr.getTextContent().equals(attributes.get(key))) {
         return false;
       }
     }
@@ -594,8 +581,8 @@ public abstract class ContainerInstall {
     // Check to make sure the node does not have more than the attribute fields
     for (int i = 0; i < nodeAttrs.getLength(); i++) {
       String attr = nodeAttrs.item(i).getNodeName();
-      if (attributes.get(attr) == null || (checkSimilarValues
-          && !attributes.get(attr).equals(nodeAttrs.item(i).getTextContent()))) {
+      if (attributes.get(attr) == null || checkSimilarValues
+          && !attributes.get(attr).equals(nodeAttrs.item(i).getTextContent())) {
         return false;
       }
     }
diff --git a/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/GradleBuildWithGeodeCoreAcceptanceTest.java b/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/GradleBuildWithGeodeCoreAcceptanceTest.java
index 014ac84..229e054 100644
--- a/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/GradleBuildWithGeodeCoreAcceptanceTest.java
+++ b/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/GradleBuildWithGeodeCoreAcceptanceTest.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.management.internal.rest;
 
 import static org.apache.geode.test.util.ResourceUtils.copyDirectoryResource;
@@ -41,7 +40,7 @@ public class GradleBuildWithGeodeCoreAcceptanceTest {
   public TemporaryFolder temp = new TemporaryFolder();
 
   @Test
-  public void testBasicGradleBuild() throws Exception {
+  public void testBasicGradleBuild() {
     URL projectDir = getResource("/gradle-test-projects/management");
     assertThat(projectDir).isNotNull();
 
@@ -65,12 +64,11 @@ public class GradleBuildWithGeodeCoreAcceptanceTest {
     build.setStandardOutput(System.out);
     build.withArguments("-Pversion=" + geodeVersion,
         "-Pgroup=" + projectGroup,
-        "-PgeodeHome=" + geodeHome.toString());
+        "-PgeodeHome=" + geodeHome);
 
     build.forTasks("installDist", "run");
     build.run();
 
     connection.close();
   }
-
 }
diff --git a/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/StandaloneClientManagementAPIAcceptanceTest.java b/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/StandaloneClientManagementAPIAcceptanceTest.java
index 9832c58..20ddc37 100644
--- a/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/StandaloneClientManagementAPIAcceptanceTest.java
+++ b/geode-assembly/src/acceptanceTest/java/org/apache/geode/management/internal/rest/StandaloneClientManagementAPIAcceptanceTest.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.management.internal.rest;
 
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
@@ -69,7 +68,7 @@ public class StandaloneClientManagementAPIAcceptanceTest {
 
   @BeforeClass
   public static void beforeClass() {
-    /**
+    /*
      * This file was generated with:
      * keytool -genkey -dname "CN=localhost" -alias self -validity 3650 -keyalg EC \
      * -keystore trusted.keystore -keypass password -storepass password \
@@ -82,7 +81,6 @@ public class StandaloneClientManagementAPIAcceptanceTest {
   }
 
   @Test
-  @Parameterized.Parameters
   public void clientCreatesRegionUsingClusterManagementService() throws Exception {
     JarBuilder jarBuilder = new JarBuilder();
     String filePath =
diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/RestFunctionExecuteDUnitTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/RestFunctionExecuteDUnitTest.java
index 1197785..dd63738 100644
--- a/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/RestFunctionExecuteDUnitTest.java
+++ b/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/RestFunctionExecuteDUnitTest.java
@@ -12,10 +12,8 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.rest.internal.web;
 
-
 import static org.apache.geode.test.junit.rules.HttpResponseAssert.assertResponse;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.assertj.core.api.Assertions.assertThat;
@@ -38,7 +36,7 @@ import org.apache.geode.test.junit.categories.RestAPITest;
 import org.apache.geode.test.junit.rules.GeodeDevRestClient;
 import org.apache.geode.test.junit.rules.GfshCommandRule;
 
-@Category({RestAPITest.class})
+@Category(RestAPITest.class)
 public class RestFunctionExecuteDUnitTest {
 
   @ClassRule
@@ -47,8 +45,11 @@ public class RestFunctionExecuteDUnitTest {
   @ClassRule
   public static GfshCommandRule gfsh = new GfshCommandRule();
 
-  private static JarBuilder jarBuilder = new JarBuilder();
-  private static MemberVM locator, server1, server2;
+  private static final JarBuilder jarBuilder = new JarBuilder();
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
 
   private GeodeDevRestClient client;
 
@@ -58,20 +59,19 @@ public class RestFunctionExecuteDUnitTest {
     File jarsToDeploy = new File(gfsh.getWorkingDir(), "function.jar");
     jarBuilder.buildJar(jarsToDeploy, loadClassToFile());
 
-
     Properties locatorProps = new Properties();
-    locatorProps.put(ConfigurationProperties.SECURITY_MANAGER,
+    locatorProps.setProperty(ConfigurationProperties.SECURITY_MANAGER,
         SimpleSecurityManager.class.getName());
     locator = cluster.startLocatorVM(0, locatorProps);
 
     Properties props = new Properties();
-    props.put(ConfigurationProperties.START_DEV_REST_API, "true");
-    props.put("security-username", "cluster");
-    props.put("security-password", "cluster");
-    props.put(ConfigurationProperties.GROUPS, "group1");
+    props.setProperty(ConfigurationProperties.START_DEV_REST_API, "true");
+    props.setProperty("security-username", "cluster");
+    props.setProperty("security-password", "cluster");
+    props.setProperty(ConfigurationProperties.GROUPS, "group1");
     server1 = cluster.startServerVM(1, props, locator.getPort());
 
-    props.put(ConfigurationProperties.GROUPS, "group2");
+    props.setProperty(ConfigurationProperties.GROUPS, "group2");
     server2 = cluster.startServerVM(2, props, locator.getPort());
 
     gfsh.connectAndVerify(locator);
@@ -82,7 +82,7 @@ public class RestFunctionExecuteDUnitTest {
   }
 
   @Test
-  public void connectToServer1() throws Exception {
+  public void connectToServer1() {
     client = new GeodeDevRestClient("localhost", server1.getHttpPort());
     assertResponse(client.doPost("/functions/myTestFunction", "dataRead", "dataRead", ""))
         .hasStatusCode(403);
@@ -101,7 +101,7 @@ public class RestFunctionExecuteDUnitTest {
   }
 
   @Test
-  public void connectToServer2() throws Exception {
+  public void connectToServer2() {
     // function is deployed on server1
     client = new GeodeDevRestClient("localhost", server2.getHttpPort());
     assertResponse(client.doPost("/functions/myTestFunction", "dataRead", "dataRead", ""))
@@ -118,6 +118,4 @@ public class RestFunctionExecuteDUnitTest {
 
     return new File(resourcePath);
   }
-
-
 }
diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/controllers/RestAPIsWithSSLDUnitTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/controllers/RestAPIsWithSSLDUnitTest.java
index 2715ce0..68d94c1 100644
--- a/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/controllers/RestAPIsWithSSLDUnitTest.java
+++ b/geode-assembly/src/distributedTest/java/org/apache/geode/rest/internal/web/controllers/RestAPIsWithSSLDUnitTest.java
@@ -76,22 +76,27 @@ import org.apache.geode.cache.client.ClientRegionShortcut;
 import org.apache.geode.internal.security.SecurableCommunicationChannel;
 import org.apache.geode.test.dunit.rules.ClientVM;
 import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 import org.apache.geode.test.dunit.rules.MemberVM;
 import org.apache.geode.test.junit.categories.RestAPITest;
 
 /**
  * @since GemFire 8.0
  */
-@Category({RestAPITest.class})
+@Category(RestAPITest.class)
 public class RestAPIsWithSSLDUnitTest {
+
   private static final String PEOPLE_REGION_NAME = "People";
   private static final String INVALID_CLIENT_ALIAS = "INVALID_CLIENT_ALIAS";
 
-  public String urlContext = "/geode";
+  private String urlContext = "/geode";
 
   @Rule
   public ClusterStartupRule cluster = new ClusterStartupRule();
 
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
 
   private MemberVM server;
   private ClientVM client;
@@ -168,7 +173,6 @@ public class RestAPIsWithSSLDUnitTest {
   }
 
   private static CloseableHttpClient getSSLBasedHTTPClient(Properties properties) throws Exception {
-
     KeyStore clientKeys = KeyStore.getInstance("JKS");
     File keystoreJKSForPath = findKeyStoreJKS(properties);
     clientKeys.load(new FileInputStream(keystoreJKSForPath), "password".toCharArray());
@@ -193,7 +197,7 @@ public class RestAPIsWithSSLDUnitTest {
           }
         }).build();
 
-    // Host checking is disabled here , as tests might run on multiple hosts and
+    // Host checking is disabled here, as tests might run on multiple hosts and
     // host entries can not be assumed
     SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(
         sslcontext, SSLConnectionSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
@@ -229,11 +233,8 @@ public class RestAPIsWithSSLDUnitTest {
     assertEquals(json.get("gender").asText(), Gender.FEMALE.name());
   }
 
-  // Actual Tests starts here.
-
   @Test
   public void testSimpleSSL() throws Exception {
-
     Properties props = new Properties();
     props.setProperty(SSL_KEYSTORE, findTrustedJKSWithSingleEntry().getCanonicalPath());
     props.setProperty(SSL_TRUSTSTORE, findTrustedJKSWithSingleEntry().getCanonicalPath());
@@ -247,7 +248,6 @@ public class RestAPIsWithSSLDUnitTest {
 
   @Test
   public void testSimpleSSLWithMultiKey_KeyStore() throws Exception {
-
     Properties props = new Properties();
     props.setProperty(SSL_KEYSTORE,
         createTempFileFromResource(getClass(), "/org/apache/geode/internal/net/multiKey.jks")
@@ -267,7 +267,6 @@ public class RestAPIsWithSSLDUnitTest {
 
   @Test(expected = RuntimeException.class)
   public void testSimpleSSLWithMultiKey_KeyStore_WithInvalidClientKey() throws Exception {
-
     Properties props = new Properties();
     props.setProperty(SSL_KEYSTORE,
         createTempFileFromResource(getClass(), "/org/apache/geode/internal/net/multiKey.jks")
@@ -461,7 +460,6 @@ public class RestAPIsWithSSLDUnitTest {
 
   @Test
   public void testSimpleSSLLegacy() throws Exception {
-
     Properties props = new Properties();
     props.setProperty(HTTP_SERVICE_SSL_ENABLED, "true");
     props.setProperty(HTTP_SERVICE_SSL_KEYSTORE,
@@ -590,5 +588,4 @@ public class RestAPIsWithSSLDUnitTest {
     startClusterWithSSL(props);
     validateConnection(props);
   }
-
 }
diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/AssemblyContentsIntegrationTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/AssemblyContentsIntegrationTest.java
index 5ae9486..1e8632e 100644
--- a/geode-assembly/src/integrationTest/java/org/apache/geode/AssemblyContentsIntegrationTest.java
+++ b/geode-assembly/src/integrationTest/java/org/apache/geode/AssemblyContentsIntegrationTest.java
@@ -28,17 +28,23 @@ import java.util.stream.Collectors;
 
 import org.apache.commons.io.FileUtils;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.test.junit.categories.RestAPITest;
+import org.apache.geode.test.junit.rules.RequiresGeodeHome;
 
-@Category({RestAPITest.class})
+@Category(RestAPITest.class)
 public class AssemblyContentsIntegrationTest {
 
   private static final String GEODE_HOME = System.getenv("GEODE_HOME");
+
   private Collection<String> expectedAssemblyContent;
 
+  @Rule
+  public RequiresGeodeHome requiresGeodeHome = new RequiresGeodeHome();
+
   @Before
   public void loadExpectedAssemblyContent() throws IOException {
     String assemblyContent =
@@ -55,7 +61,7 @@ public class AssemblyContentsIntegrationTest {
     Files.write(Paths.get("assembly_content.txt"), currentAssemblyContent);
 
     assertThat(currentAssemblyContent)
-        .describedAs("The assembly contents have changed. Verify dependencies and "
+        .as("The assembly contents have changed. Verify dependencies and "
             + "copy geode-assembly/build/integrationTest/assembly_content.txt to "
             + "geode-assembly/src/integrationTest/resources/assembly_content.txt")
         .containsExactlyElementsOf(expectedAssemblyContent);
@@ -69,7 +75,7 @@ public class AssemblyContentsIntegrationTest {
     Path geodeHomePath = Paths.get(GEODE_HOME);
 
     assertThat(geodeHomeDirectory)
-        .describedAs(
+        .as(
             "Please set the GEODE_HOME environment variable to the product installation directory.")
         .isDirectory();
 
diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/BundledJarsJUnitTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/BundledJarsJUnitTest.java
index bdc8ba4..d0c4818 100644
--- a/geode-assembly/src/integrationTest/java/org/apache/geode/BundledJarsJUnitTest.java
+++ b/geode-assembly/src/integrationTest/java/org/apache/geode/BundledJarsJUnitTest.java
@@ -19,9 +19,11 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.UncheckedIOException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.Collection;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
@@ -31,18 +33,24 @@ import java.util.stream.Stream;
 
 import org.apache.commons.io.FileUtils;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.test.junit.categories.RestAPITest;
+import org.apache.geode.test.junit.rules.RequiresGeodeHome;
 
-@Category({RestAPITest.class})
+@Category(RestAPITest.class)
 public class BundledJarsJUnitTest {
 
   private static final String VERSION_PATTERN = "[0-9-_.v]{3,}.*\\.jar$";
   private static final String GEODE_HOME = System.getenv("GEODE_HOME");
+
   private Set<String> expectedJars;
 
+  @Rule
+  public RequiresGeodeHome requiresGeodeHome = new RequiresGeodeHome();
+
   @Before
   public void loadExpectedJars() throws IOException {
     String expectedJarFile =
@@ -54,34 +62,39 @@ public class BundledJarsJUnitTest {
 
   @Test
   public void verifyBundledJarsHaveNotChanged() throws IOException {
-    TreeMap<String, String> sortedJars = getBundledJars();
+    Map<String, String> sortedJars = getBundledJars();
     Stream<String> lines =
         sortedJars.entrySet().stream().map(entry -> removeVersion(entry.getKey()));
     Set<String> bundledJarNames = new TreeSet<>(lines.collect(Collectors.toSet()));
 
     Files.write(Paths.get("bundled_jars.txt"), bundledJarNames);
 
-    TreeSet<String> newJars = new TreeSet<>(bundledJarNames);
+    Set<String> newJars = new TreeSet<>(bundledJarNames);
     newJars.removeAll(expectedJars);
-    TreeSet<String> missingJars = new TreeSet<>(expectedJars);
+
+    Set<String> missingJars = new TreeSet<>(expectedJars);
     missingJars.removeAll(bundledJarNames);
 
     String message =
         "The bundled jars have changed. Please make sure you update the licence and notice"
-            + "\nas described in https://cwiki.apache.org/confluence/display/GEODE/License+Guide+for+Contributors"
-            + "\nWhen fixed, copy geode-assembly/build/test/bundled_jars.txt"
-            + "\nto src/test/resources/expected_jars.txt" + "\nRemoved Jars\n--------------\n"
-            + String.join("\n", missingJars) + "\n\nAdded Jars\n--------------\n"
-            + String.join("\n", newJars) + "\n\n";
+            + System.lineSeparator()
+            + "as described in https://cwiki.apache.org/confluence/display/GEODE/License+Guide+for+Contributors"
+            + System.lineSeparator() + "When fixed, copy geode-assembly/build/test/bundled_jars.txt"
+            + System.lineSeparator() + "to src/test/resources/expected_jars.txt"
+            + System.lineSeparator() + "Removed Jars" + System.lineSeparator() + "--------------"
+            + System.lineSeparator() + String.join(System.lineSeparator(), missingJars)
+            + System.lineSeparator() + System.lineSeparator() + "Added Jars"
+            + System.lineSeparator() + "--------------" + System.lineSeparator()
+            + String.join(System.lineSeparator(), newJars) + System.lineSeparator()
+            + System.lineSeparator();
 
     assertTrue(message, expectedJars.equals(bundledJarNames));
-
   }
 
   /**
    * Find all of the jars bundled with the project. Key is the name of the jar, value is the path.
    */
-  private TreeMap<String, String> getBundledJars() {
+  private Map<String, String> getBundledJars() {
     File geodeHomeDirectory = new File(GEODE_HOME);
 
     assertTrue(
@@ -89,11 +102,11 @@ public class BundledJarsJUnitTest {
         geodeHomeDirectory.isDirectory());
 
     Collection<File> jars = FileUtils.listFiles(geodeHomeDirectory, new String[] {"jar"}, true);
-    TreeMap<String, String> sortedJars = new TreeMap<>();
+    Map<String, String> sortedJars = new TreeMap<>();
     jars.forEach(jar -> sortedJars.put(jar.getName(), jar.getPath()));
 
     Collection<File> wars = FileUtils.listFiles(geodeHomeDirectory, new String[] {"war"}, true);
-    TreeSet<File> sortedWars = new TreeSet<>(wars);
+    Set<File> sortedWars = new TreeSet<>(wars);
     sortedWars.stream().flatMap(BundledJarsJUnitTest::extractJarNames)
         .forEach(jar -> sortedJars.put(jar.getName(), jar.getPath()));
 
@@ -118,8 +131,7 @@ public class BundledJarsJUnitTest {
           // Materialize the list of files while the war is still open
           .collect(Collectors.toList()).stream();
     } catch (IOException e) {
-      throw new RuntimeException(e);
+      throw new UncheckedIOException(e);
     }
   }
-
 }
diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/GeodeDependencyJarIntegrationTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/GeodeDependencyJarIntegrationTest.java
index c5998d1..7b8a6ea 100644
--- a/geode-assembly/src/integrationTest/java/org/apache/geode/GeodeDependencyJarIntegrationTest.java
+++ b/geode-assembly/src/integrationTest/java/org/apache/geode/GeodeDependencyJarIntegrationTest.java
@@ -28,17 +28,23 @@ import java.util.jar.Manifest;
 import java.util.stream.Collectors;
 
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.test.junit.categories.RestAPITest;
+import org.apache.geode.test.junit.rules.RequiresGeodeHome;
 
-@Category({RestAPITest.class})
+@Category(RestAPITest.class)
 public class GeodeDependencyJarIntegrationTest {
 
   private static final String GEODE_HOME = System.getenv("GEODE_HOME");
+
   private List<String> expectedClasspathElements;
 
+  @Rule
+  public RequiresGeodeHome requiresGeodeHome = new RequiresGeodeHome();
+
   @Before
   public void loadExpectedClassPath() throws IOException {
     String dependencyClasspath =
diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/RestSecurityWithSSLTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/RestSecurityWithSSLTest.java
index 0bc239b..32f83f5 100644
--- a/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/RestSecurityWithSSLTest.java
+++ b/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/RestSecurityWithSSLTest.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.rest.internal.web;
 
 import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_MANAGER;
@@ -43,7 +42,7 @@ import org.apache.geode.test.junit.rules.ServerStarterRule;
 @Category({SecurityTest.class, RestAPITest.class})
 public class RestSecurityWithSSLTest {
 
-  private static File KEYSTORE_FILE =
+  private static final File KEYSTORE_FILE =
       new File(createTempFileFromResource(RestSecurityWithSSLTest.class, "/ssl/trusted.keystore")
           .getAbsolutePath());
 
diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityWithSSLTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityWithSSLTest.java
index b5caa1f..3b899a2 100644
--- a/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityWithSSLTest.java
+++ b/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityWithSSLTest.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.tools.pulse;
 
 import static org.apache.geode.distributed.ConfigurationProperties.CLUSTER_SSL_ENABLED;
@@ -60,7 +59,7 @@ import org.apache.geode.test.junit.rules.LocatorStarterRule;
 @Category({SecurityTest.class, PulseTest.class})
 public class PulseSecurityWithSSLTest {
 
-  private static File jks =
+  private static final File jks =
       new File(createTempFileFromResource(PulseSecurityWithSSLTest.class, "/ssl/trusted.keystore")
           .getAbsolutePath());
 
@@ -132,5 +131,4 @@ public class PulseSecurityWithSSLTest {
 
     assertThat(JsonPath.parse(body).read("$.SystemAlerts.connectedFlag", Boolean.class)).isTrue();
   }
-
 }
diff --git a/geode-connectors/src/distributedTest/java/org/apache/geode/connectors/jdbc/internal/cli/CreateMappingCommandDUnitTest.java b/geode-connectors/src/distributedTest/java/org/apache/geode/connectors/jdbc/internal/cli/CreateMappingCommandDUnitTest.java
index e04519f..94d6a13 100644
--- a/geode-connectors/src/distributedTest/java/org/apache/geode/connectors/jdbc/internal/cli/CreateMappingCommandDUnitTest.java
+++ b/geode-connectors/src/distributedTest/java/org/apache/geode/connectors/jdbc/internal/cli/CreateMappingCommandDUnitTest.java
@@ -31,7 +31,6 @@ import static org.assertj.core.api.Assertions.assertThat;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.net.URISyntaxException;
 import java.sql.Connection;
 import java.sql.JDBCType;
 import java.sql.SQLException;
@@ -81,7 +80,7 @@ import org.apache.geode.test.junit.rules.GfshCommandRule;
 import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 
-@Category({JDBCConnectorTest.class})
+@Category(JDBCConnectorTest.class)
 public class CreateMappingCommandDUnitTest {
 
   private static final String TEST_REGION = "testRegion";
@@ -126,7 +125,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   @After
-  public void after() throws Exception {
+  public void after() {
     teardownDatabase();
   }
 
@@ -210,17 +209,14 @@ public class CreateMappingCommandDUnitTest {
         .statusIsSuccess();
   }
 
-  private static RegionMapping getRegionMappingFromClusterConfig(String regionName,
-      String groups) {
+  private static RegionMapping getRegionMappingFromClusterConfig(String regionName, String groups) {
     CacheConfig cacheConfig =
         InternalLocator.getLocator().getConfigurationPersistenceService().getCacheConfig(groups);
     RegionConfig regionConfig = cacheConfig.getRegions().stream()
         .filter(region -> region.getName().equals(convertRegionPathToName(regionName))).findFirst()
         .orElse(null);
-    RegionMapping regionMapping =
-        (RegionMapping) regionConfig.getCustomRegionElements().stream()
-            .filter(element -> element instanceof RegionMapping).findFirst().orElse(null);
-    return regionMapping;
+    return (RegionMapping) regionConfig.getCustomRegionElements().stream()
+        .filter(element -> element instanceof RegionMapping).findFirst().orElse(null);
   }
 
   private static RegionMapping getRegionMappingFromService(String regionName) {
@@ -229,8 +225,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private static void validateAsyncEventQueueCreatedInClusterConfig(String regionName,
-      String groups,
-      boolean isParallel) {
+      String groups, boolean isParallel) {
     CacheConfig cacheConfig =
         InternalLocator.getLocator().getConfigurationPersistenceService().getCacheConfig(groups);
     List<CacheConfig.AsyncEventQueue> queueList = cacheConfig.getAsyncEventQueues();
@@ -243,8 +238,7 @@ public class CreateMappingCommandDUnitTest {
     assertThat(queue.isParallel()).isEqualTo(isParallel);
   }
 
-  private static CacheConfig.AsyncEventQueue findQueue(
-      List<CacheConfig.AsyncEventQueue> queueList,
+  private static CacheConfig.AsyncEventQueue findQueue(List<CacheConfig.AsyncEventQueue> queueList,
       String queueName) {
     for (CacheConfig.AsyncEventQueue queue : queueList) {
       if (queue.getId().equals(queueName)) {
@@ -261,8 +255,7 @@ public class CreateMappingCommandDUnitTest {
     return regionPath;
   }
 
-  private static void validateRegionAlteredInClusterConfig(String regionName,
-      String groups,
+  private static void validateRegionAlteredInClusterConfig(String regionName, String groups,
       boolean synchronous) {
     CacheConfig cacheConfig =
         InternalLocator.getLocator().getConfigurationPersistenceService().getCacheConfig(groups);
@@ -489,8 +482,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private static void assertValidMappingOnLocator(RegionMapping mapping, String regionName,
-      String groups,
-      boolean synchronous, boolean isParallel) {
+      String groups, boolean synchronous, boolean isParallel) {
     assertValidMapping(mapping);
     validateRegionAlteredInClusterConfig(regionName, groups, synchronous);
     if (!synchronous) {
@@ -523,8 +515,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private static void assertValidEmployeeMappingOnLocator(RegionMapping mapping, String regionName,
-      String groups,
-      boolean synchronous, boolean isParallel, String tableName) {
+      String groups, boolean synchronous, boolean isParallel, String tableName) {
     assertValidEmployeeMapping(mapping, tableName);
     validateRegionAlteredInClusterConfig(regionName, groups, synchronous);
     if (!synchronous) {
@@ -550,8 +541,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private static void assertValidResourcePDXMappingOnServer(RegionMapping mapping,
-      String regionName,
-      boolean synchronous, boolean isParallel, String tableName) {
+      String regionName, boolean synchronous, boolean isParallel, String tableName) {
     assertValidResourcePDXMapping(mapping, tableName);
     validateRegionAlteredOnServer(regionName, synchronous);
     if (!synchronous) {
@@ -560,9 +550,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private static void assertValidResourcePDXMappingOnLocator(RegionMapping mapping,
-      String regionName,
-      String groups,
-      boolean synchronous, boolean isParallel, String tableName) {
+      String regionName, String groups, boolean synchronous, boolean isParallel, String tableName) {
     assertValidResourcePDXMapping(mapping, tableName);
     validateRegionAlteredInClusterConfig(regionName, groups, synchronous);
     if (!synchronous) {
@@ -571,8 +559,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   private File loadTestResource(String fileName) {
-    String filePath =
-        createTempFileFromResource(this.getClass(), fileName).getAbsolutePath();
+    String filePath = createTempFileFromResource(getClass(), fileName).getAbsolutePath();
     assertThat(filePath).isNotNull();
 
     return new File(filePath);
@@ -588,8 +575,7 @@ public class CreateMappingCommandDUnitTest {
 
   private File createJar() throws IOException {
     JarBuilder jarBuilder = new JarBuilder();
-    File source = loadTestResource(
-        "/org/apache/geode/internal/ResourcePDX.java");
+    File source = loadTestResource("/org/apache/geode/internal/ResourcePDX.java");
 
     File outputJar = new File(temporaryFolder.getRoot(), "output.jar");
     jarBuilder.buildJar(outputJar, source);
@@ -598,8 +584,7 @@ public class CreateMappingCommandDUnitTest {
 
   private File createClassFile() throws IOException {
     final JavaCompiler javaCompiler = new JavaCompiler();
-    File source = loadTestResource(
-        "/org/apache/geode/internal/ResourcePDX.java");
+    File source = loadTestResource("/org/apache/geode/internal/ResourcePDX.java");
     List<CompiledSourceCode> compiledSourceCodes = javaCompiler.compile(source);
     String className = compiledSourceCodes.get(0).className;
     String fileName = className.substring(className.lastIndexOf(".") + 1) + ".class";
@@ -629,7 +614,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   @Test
-  public void createMappingWithDeployedPdxClassSucceeds() throws IOException, URISyntaxException {
+  public void createMappingWithDeployedPdxClassSucceeds() throws IOException {
     String region1Name = "region1";
     setupReplicate(region1Name);
 
@@ -657,7 +642,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   @Test
-  public void createMappingWithPdxClassFileSetToAJarFile() throws IOException, URISyntaxException {
+  public void createMappingWithPdxClassFileSetToAJarFile() throws IOException {
     String region1Name = "region1";
     setupReplicate(region1Name);
     File jarFile = createJar();
@@ -686,7 +671,7 @@ public class CreateMappingCommandDUnitTest {
   }
 
   @Test
-  public void createMappingWithNonExistingPdxClassFileFails() throws IOException {
+  public void createMappingWithNonExistingPdxClassFileFails() {
     String region1Name = "region1";
     setupReplicate(region1Name);
 
@@ -704,11 +689,10 @@ public class CreateMappingCommandDUnitTest {
   }
 
   @Test
-  public void createMappingWithInvalidJarPdxClassFileFails() throws IOException {
+  public void createMappingWithInvalidJarPdxClassFileFails() {
     String region1Name = "region1";
     setupReplicate(region1Name);
-    File invalidFile = loadTestResource(
-        "/org/apache/geode/internal/ResourcePDX.java");
+    File invalidFile = loadTestResource("/org/apache/geode/internal/ResourcePDX.java");
 
     CommandStringBuilder csb = new CommandStringBuilder(CREATE_MAPPING);
     csb.addOption(REGION_NAME, region1Name);
@@ -725,7 +709,7 @@ public class CreateMappingCommandDUnitTest {
 
   @Test
   public void createMappingWithPdxClassFileSetToAClassFile()
-      throws IOException, URISyntaxException {
+      throws IOException {
     String region1Name = "region1";
     setupReplicate(region1Name);
     File classFile = createClassFile();
@@ -882,130 +866,6 @@ public class CreateMappingCommandDUnitTest {
     });
   }
 
-  public static class Employee implements PdxSerializable {
-    private String id;
-    private String name;
-    private int age;
-
-    public Employee() {
-      // nothing
-    }
-
-    Employee(String id, String name, int age) {
-      this.id = id;
-      this.name = name;
-      this.age = age;
-    }
-
-    String getId() {
-      return id;
-    }
-
-    String getName() {
-      return name;
-    }
-
-    int getAge() {
-      return age;
-    }
-
-    @Override
-    public void toData(PdxWriter writer) {
-      writer.writeString("id", this.id);
-      writer.writeString("name", this.name);
-      writer.writeInt("age", this.age);
-    }
-
-    @Override
-    public void fromData(PdxReader reader) {
-      this.id = reader.readString("id");
-      this.name = reader.readString("name");
-      this.age = reader.readInt("age");
-    }
-  }
-
-  public static class EmployeeNumeric implements PdxSerializerObject {
-    private String id;
-    private String name;
-    private int age;
-    private float income;
-    private long refid;
-
-    public EmployeeNumeric() {
-      // nothing
-    }
-
-    EmployeeNumeric(String id, String name, int age, float income, long refid) {
-      this.id = id;
-      this.name = name;
-      this.age = age;
-      this.income = income;
-      this.refid = refid;
-    }
-
-    String getId() {
-      return id;
-    }
-
-    String getName() {
-      return name;
-    }
-
-    int getAge() {
-      return age;
-    }
-
-    float getIncome() {
-      return income;
-    }
-
-    void setIncome(float income) {
-      this.income = income;
-    }
-
-    long getRefid() {
-      return refid;
-    }
-
-    void setRefid(long refid) {
-      this.refid = refid;
-    }
-  }
-
-  public static class IdAndName implements PdxSerializable {
-    private String id;
-    private String name;
-
-    public IdAndName() {
-      // nothing
-    }
-
-    IdAndName(String id, String name) {
-      this.id = id;
-      this.name = name;
-    }
-
-    String getId() {
-      return id;
-    }
-
-    String getName() {
-      return name;
-    }
-
-    @Override
-    public void toData(PdxWriter writer) {
-      writer.writeString("myid", this.id);
-      writer.writeString("name", this.name);
-    }
-
-    @Override
-    public void fromData(PdxReader reader) {
-      this.id = reader.readString("myid");
-      this.name = reader.readString("name");
-    }
-  }
-
   @Test
   public void createMappingsWithExistingPdxName() {
     String region1Name = "region1";
@@ -1278,4 +1138,130 @@ public class CreateMappingCommandDUnitTest {
             + " must not already exist.");
   }
 
+  private static class Employee implements PdxSerializable {
+
+    private String id;
+    private String name;
+    private int age;
+
+    public Employee() {
+      // nothing
+    }
+
+    Employee(String id, String name, int age) {
+      this.id = id;
+      this.name = name;
+      this.age = age;
+    }
+
+    String getId() {
+      return id;
+    }
+
+    String getName() {
+      return name;
+    }
+
+    int getAge() {
+      return age;
+    }
+
+    @Override
+    public void toData(PdxWriter writer) {
+      writer.writeString("id", id);
+      writer.writeString("name", name);
+      writer.writeInt("age", age);
+    }
+
+    @Override
+    public void fromData(PdxReader reader) {
+      id = reader.readString("id");
+      name = reader.readString("name");
+      age = reader.readInt("age");
+    }
+  }
+
+  private static class EmployeeNumeric implements PdxSerializerObject {
+
+    private String id;
+    private String name;
+    private int age;
+    private float income;
+    private long refid;
+
+    public EmployeeNumeric() {
+      // nothing
+    }
+
+    EmployeeNumeric(String id, String name, int age, float income, long refid) {
+      this.id = id;
+      this.name = name;
+      this.age = age;
+      this.income = income;
+      this.refid = refid;
+    }
+
+    String getId() {
+      return id;
+    }
+
+    String getName() {
+      return name;
+    }
+
+    int getAge() {
+      return age;
+    }
+
+    float getIncome() {
+      return income;
+    }
+
+    void setIncome(float income) {
+      this.income = income;
+    }
+
+    long getRefid() {
+      return refid;
+    }
+
+    void setRefid(long refid) {
+      this.refid = refid;
+    }
+  }
+
+  private static class IdAndName implements PdxSerializable {
+
+    private String id;
+    private String name;
+
+    public IdAndName() {
+      // nothing
+    }
+
+    IdAndName(String id, String name) {
+      this.id = id;
+      this.name = name;
+    }
+
+    String getId() {
+      return id;
+    }
+
+    String getName() {
+      return name;
+    }
+
+    @Override
+    public void toData(PdxWriter writer) {
+      writer.writeString("myid", id);
+      writer.writeString("name", name);
+    }
+
+    @Override
+    public void fromData(PdxReader reader) {
+      id = reader.readString("myid");
+      name = reader.readString("name");
+    }
+  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java
index 3c9d803..4d7bb23 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java
@@ -31,6 +31,9 @@ import static org.apache.geode.distributed.ConfigurationProperties.USE_CLUSTER_C
 import static org.apache.geode.internal.DataSerializableFixedID.SERIAL_ACKED_MESSAGE;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.getTimeout;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.apache.geode.test.dunit.VM.getVM;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.assertj.core.api.Assertions.assertThat;
 
@@ -38,6 +41,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.File;
 import java.io.IOException;
+import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -53,6 +57,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -71,9 +77,8 @@ import org.apache.geode.distributed.internal.membership.gms.membership.GMSJoinLe
 import org.apache.geode.internal.DSFIDFactory;
 import org.apache.geode.internal.cache.DirectReplyMessage;
 import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 import org.apache.geode.test.dunit.rules.DistributedRule;
 import org.apache.geode.test.junit.categories.BackwardCompatibilityTest;
 import org.apache.geode.test.junit.categories.MembershipTest;
@@ -86,106 +91,89 @@ import org.apache.geode.test.version.VersionManager;
  */
 @Category({MembershipTest.class, BackwardCompatibilityTest.class})
 @RunWith(Parameterized.class)
-@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
-public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
+@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+public class ClusterCommunicationsDUnitTest implements Serializable {
 
-  private boolean conserveSockets;
-  private boolean useSSL;
+  private static final int NUM_SERVERS = 2;
+  private static final int SMALL_BUFFER_SIZE = 8000;
 
-  enum RunConfiguration {
-    SHARED_CONNECTIONS(true, false),
-    SHARED_CONNECTIONS_WITH_SSL(true, true),
-    UNSHARED_CONNECTIONS(false, false),
-    UNSHARED_CONNECTIONS_WITH_SSL(false, true);
+  private static final long serialVersionUID = -3438183140385150550L;
 
-    boolean useSSL;
-    boolean conserveSockets;
+  private static Cache cache;
 
-    RunConfiguration(boolean conserveSockets, boolean useSSL) {
-      this.useSSL = useSSL;
-      this.conserveSockets = conserveSockets;
-    }
-  }
+  private final String regionName = "clusterTestRegion";
 
-  @Parameterized.Parameters(name = "{0}")
+  private boolean conserveSockets;
+  private boolean useSSL;
+
+  @Parameters(name = "{0}")
   public static Collection<RunConfiguration> data() {
     return Arrays.asList(RunConfiguration.values());
   }
 
-  private static final int NUM_SERVERS = 2;
-  private static final int SMALL_BUFFER_SIZE = 8000;
-
-  private static final long serialVersionUID = -3438183140385150550L;
-
-  private static Cache cache;
-
   @Rule
   public DistributedRule distributedRule =
       DistributedRule.builder().withVMCount(NUM_SERVERS + 1).build();
 
   @Rule
-  public final SerializableTestName testName = new SerializableTestName();
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
 
-  private final String regionName = "clusterTestRegion";
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
 
   public ClusterCommunicationsDUnitTest(RunConfiguration runConfiguration) {
-    this.useSSL = runConfiguration.useSSL;
-    this.conserveSockets = runConfiguration.conserveSockets;
+    useSSL = runConfiguration.useSSL;
+    conserveSockets = runConfiguration.conserveSockets;
   }
 
   @Before
   public void setUp() throws Exception {
-    final Boolean testWithSSL = useSSL;
-    final Boolean testWithConserveSocketsTrue = conserveSockets;
-    Invoke.invokeInEveryVM(() -> {
-      this.useSSL = testWithSSL;
-      this.conserveSockets = testWithConserveSocketsTrue;
-    });
-    IgnoredException.addIgnoredException("Socket Closed");
-    IgnoredException.addIgnoredException("Remote host closed connection during handshake");
+    addIgnoredException("Socket Closed");
+    addIgnoredException("Remote host closed connection during handshake");
   }
 
   @Test
   public void createEntryAndVerifyUpdate() {
-    int locatorPort = createLocator(VM.getVM(0));
+    int locatorPort = createLocator(getVM(0));
     for (int i = 1; i <= NUM_SERVERS; i++) {
-      createCacheAndRegion(VM.getVM(i), locatorPort);
+      createCacheAndRegion(getVM(i), locatorPort);
     }
-    performCreate(VM.getVM(1));
+    performCreate(getVM(1));
     for (int i = 1; i <= NUM_SERVERS; i++) {
-      verifyCreatedEntry(VM.getVM(i));
+      verifyCreatedEntry(getVM(i));
     }
-    performUpdate(VM.getVM(1));
+    performUpdate(getVM(1));
     for (int i = 1; i <= NUM_SERVERS; i++) {
-      verifyUpdatedEntry(VM.getVM(i));
+      verifyUpdatedEntry(getVM(i));
     }
   }
 
   @Test
   public void createEntryWithBigMessage() {
-    int locatorPort = createLocator(VM.getVM(0));
+    int locatorPort = createLocator(getVM(0));
     for (int i = 1; i <= NUM_SERVERS; i++) {
-      createCacheAndRegion(VM.getVM(i), locatorPort);
+      createCacheAndRegion(getVM(i), locatorPort);
     }
-    performCreateWithLargeValue(VM.getVM(1));
+    performCreateWithLargeValue(getVM(1));
     // fault the value into an empty cache - forces use of message chunking
     for (int i = 1; i <= NUM_SERVERS - 1; i++) {
-      verifyCreatedEntry(VM.getVM(i));
+      verifyCreatedEntry(getVM(i));
     }
   }
 
   @Test
   public void receiveBigResponse() {
-    Invoke.invokeInEveryVM(() -> DSFIDFactory.registerDSFID(SERIAL_ACKED_MESSAGE,
+    invokeInEveryVM(() -> DSFIDFactory.registerDSFID(SERIAL_ACKED_MESSAGE,
         SerialAckedMessageWithBigReply.class));
     try {
-      int locatorPort = createLocator(VM.getVM(0));
+      int locatorPort = createLocator(getVM(0));
       for (int i = 1; i <= NUM_SERVERS; i++) {
-        createCacheAndRegion(VM.getVM(i), locatorPort);
+        createCacheAndRegion(getVM(i), locatorPort);
       }
-      final DistributedMember vm2ID =
-          VM.getVM(2).invoke(() -> cache.getDistributedSystem().getDistributedMember());
-      VM.getVM(1).invoke("receive a large direct-reply message", () -> {
+      DistributedMember vm2ID =
+          getVM(2).invoke(() -> cache.getDistributedSystem().getDistributedMember());
+      getVM(1).invoke("receive a large direct-reply message", () -> {
         SerialAckedMessageWithBigReply messageWithBigReply = new SerialAckedMessageWithBigReply();
         await().until(() -> {
           messageWithBigReply.send(Collections.singleton(vm2ID));
@@ -193,7 +181,7 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
         });
       });
     } finally {
-      Invoke.invokeInEveryVM(
+      invokeInEveryVM(
           () -> DSFIDFactory.registerDSFID(SERIAL_ACKED_MESSAGE, SerialAckedMessage.class));
     }
   }
@@ -209,7 +197,7 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
     VM server1VM = Host.getHost(0).getVM(testVersion, 1);
     int locatorPort = createLocator(locatorVM);
     createCacheAndRegion(server1VM, locatorPort);
-    performCreate(VM.getVM(1));
+    performCreate(getVM(1));
 
     // roll the locator to the current version
     locatorVM.invoke("stop locator", () -> Locator.getLocator().stop());
@@ -234,7 +222,6 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
     server1VM = Host.getHost(0).getVM(VersionManager.CURRENT_VERSION, 1);
     createCacheAndRegion(server1VM, locatorPort);
 
-
     verifyCreatedEntry(server1VM);
     verifyCreatedEntry(server2VM);
   }
@@ -246,7 +233,6 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
     });
   }
 
-
   private void performCreate(VM memberVM) {
     memberVM.invoke("perform create", () -> cache
         .getRegion(regionName).put("testKey", "testValue"));
@@ -293,35 +279,50 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
     // if you need to debug SSL communications use this property:
     // System.setProperty("javax.net.debug", "all");
     Properties properties = getDistributedSystemProperties();
-    properties.put(LOCATORS, "localhost[" + locatorPort + "]");
+    properties.setProperty(LOCATORS, "localhost[" + locatorPort + "]");
     return new CacheFactory(properties).create();
   }
 
   public Properties getDistributedSystemProperties() {
     Properties properties = new Properties();
-    properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
-    properties.put(USE_CLUSTER_CONFIGURATION, "false");
-    properties.put(NAME, "vm" + VM.getCurrentVMNum());
-    properties.put(CONSERVE_SOCKETS, "" + conserveSockets);
-    properties.put(SOCKET_LEASE_TIME, "10000");
-    properties.put(SOCKET_BUFFER_SIZE, "" + SMALL_BUFFER_SIZE);
+    properties.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(USE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(NAME, "vm" + VM.getCurrentVMNum());
+    properties.setProperty(CONSERVE_SOCKETS, "" + conserveSockets);
+    properties.setProperty(SOCKET_LEASE_TIME, "10000");
+    properties.setProperty(SOCKET_BUFFER_SIZE, "" + SMALL_BUFFER_SIZE);
 
     if (useSSL) {
-      properties.put(SSL_ENABLED_COMPONENTS, "cluster,locator");
-      properties.put(SSL_KEYSTORE,
-          createTempFileFromResource(this.getClass(), "server.keystore")
+      properties.setProperty(SSL_ENABLED_COMPONENTS, "cluster,locator");
+      properties
+          .setProperty(SSL_KEYSTORE, createTempFileFromResource(getClass(), "server.keystore")
               .getAbsolutePath());
-      properties.put(SSL_TRUSTSTORE,
-          createTempFileFromResource(this.getClass(), "server.keystore")
+      properties.setProperty(SSL_TRUSTSTORE,
+          createTempFileFromResource(getClass(), "server.keystore")
               .getAbsolutePath());
-      properties.put(SSL_PROTOCOLS, "TLSv1.2");
-      properties.put(SSL_KEYSTORE_PASSWORD, "password");
-      properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-      properties.put(SSL_REQUIRE_AUTHENTICATION, "true");
+      properties.setProperty(SSL_PROTOCOLS, "TLSv1.2");
+      properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+      properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+      properties.setProperty(SSL_REQUIRE_AUTHENTICATION, "true");
     }
     return properties;
   }
 
+  enum RunConfiguration {
+    SHARED_CONNECTIONS(true, false),
+    SHARED_CONNECTIONS_WITH_SSL(true, true),
+    UNSHARED_CONNECTIONS(false, false),
+    UNSHARED_CONNECTIONS_WITH_SSL(false, true);
+
+    boolean useSSL;
+    boolean conserveSockets;
+
+    RunConfiguration(boolean conserveSockets, boolean useSSL) {
+      this.useSSL = useSSL;
+      this.conserveSockets = conserveSockets;
+    }
+  }
+
   /**
    * SerialAckedMessageWithBigReply requires conserve-sockets=false and acts to send
    * a large reply message to the sender. You must have already created a cache in the
@@ -329,21 +330,19 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
    * of SERIAL_ACKED_MESSAGE. Don't forget to reset the registration to
    * SerialAckedMessage at the end of the test.
    */
-  public static class SerialAckedMessageWithBigReply extends DistributionMessage
-      implements MessageWithReply,
-      DirectReplyMessage {
+  private static class SerialAckedMessageWithBigReply extends DistributionMessage
+      implements MessageWithReply, DirectReplyMessage {
     static final int DSFID = SERIAL_ACKED_MESSAGE;
 
     private int processorId;
-    private transient ClusterDistributionManager originDm;
-    private transient DirectReplyProcessor replyProcessor;
+    private ClusterDistributionManager originDm;
+    private DirectReplyProcessor replyProcessor;
 
     public SerialAckedMessageWithBigReply() {
-      super();
       InternalDistributedSystem ds = InternalDistributedSystem.getAnyInstance();
-      if (ds != null) { // this constructor is used in serialization as well as when sending to
-                        // others
-        this.originDm = (ClusterDistributionManager) ds.getDistributionManager();
+      // this constructor is used in serialization as well as when sending to others
+      if (ds != null) {
+        originDm = (ClusterDistributionManager) ds.getDistributionManager();
       }
     }
 
@@ -355,7 +354,7 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
       processorId = replyProcessor.getProcessorId();
       setRecipients(recipients);
       Set failures = originDm.putOutgoing(this);
-      if (failures != null && failures.size() > 0) {
+      if (failures != null && !failures.isEmpty()) {
         for (Object failure : failures) {
           System.err.println("Unable to send serial acked message to " + failure);
         }
@@ -417,9 +416,8 @@ public class ClusterCommunicationsDUnitTest implements java.io.Serializable {
     @Override
     public void registerProcessor() {
       if (replyProcessor != null) {
-        this.processorId = this.replyProcessor.register();
+        processorId = replyProcessor.register();
       }
     }
   }
-
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/CacheServerSSLConnectionDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
index 4a2d718..6d8d5e2 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
@@ -42,11 +42,15 @@ import static org.apache.geode.distributed.ConfigurationProperties.SSL_PROTOCOLS
 import static org.apache.geode.distributed.ConfigurationProperties.SSL_REQUIRE_AUTHENTICATION;
 import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE;
 import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE_PASSWORD;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.apache.geode.test.dunit.VM.getHostName;
+import static org.apache.geode.test.dunit.VM.getVM;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
@@ -61,11 +65,19 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
+import javax.net.ssl.SSLException;
+import javax.net.ssl.SSLHandshakeException;
+
+import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -82,12 +94,11 @@ import org.apache.geode.distributed.Locator;
 import org.apache.geode.internal.security.SecurableCommunicationChannel;
 import org.apache.geode.security.AuthenticationRequiredException;
 import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.RMIException;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
 
@@ -97,24 +108,10 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
  */
 @Category({ClientServerTest.class})
 @RunWith(Parameterized.class)
-@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+@SuppressWarnings("serial")
 public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase {
 
-  private static boolean useOldSSLSettings;
-
-  @Parameterized.Parameters
-  public static Collection<Boolean> data() {
-    List<Boolean> result = new ArrayList<>();
-    result.add(Boolean.TRUE);
-    result.add(Boolean.FALSE);
-    return result;
-  }
-
-  public CacheServerSSLConnectionDUnitTest(Boolean useOldSSLSettings) {
-    super();
-    CacheServerSSLConnectionDUnitTest.useOldSSLSettings = useOldSSLSettings.booleanValue();
-  }
-
   private static final String TRUSTED_STORE = "trusted.keystore";
   private static final String CLIENT_KEY_STORE = "default.keystore";
   private static final String CLIENT_TRUST_STORE = "default.keystore";
@@ -129,17 +126,27 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
   private int cacheServerPort;
   private String hostName;
 
-  @Override
-  public final void preSetUp() throws Exception {
-    disconnectAllFromDS();
-    instance = this;
-    Invoke
-        .invokeInEveryVM(() -> instance = new CacheServerSSLConnectionDUnitTest(useOldSSLSettings));
+  private static boolean useOldSSLSettings;
+
+  @Parameters
+  public static Collection<Boolean> data() {
+    List<Boolean> result = new ArrayList<>();
+    result.add(Boolean.TRUE);
+    result.add(Boolean.FALSE);
+    return result;
+  }
+
+  public CacheServerSSLConnectionDUnitTest(Boolean useOldSSLSettings) {
+    CacheServerSSLConnectionDUnitTest.useOldSSLSettings = useOldSSLSettings;
   }
 
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
+
   @AfterClass
   public static void postClass() {
-    Invoke.invokeInEveryVM(() -> {
+    invokeInEveryVM(() -> {
       if (instance.cache != null) {
         instance.cache.close();
       }
@@ -151,7 +158,23 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     instance = null;
   }
 
-  public Cache createCache(Properties props) throws Exception {
+  @Before
+  public void setUp() {
+    disconnectAllFromDS();
+    instance = this;
+    invokeInEveryVM(() -> instance = new CacheServerSSLConnectionDUnitTest(useOldSSLSettings));
+  }
+
+  @After
+  public void tearDown() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
+
+    clientVM.invoke(() -> closeClientCacheTask());
+    serverVM.invoke(() -> closeCacheTask());
+  }
+
+  private Cache createCache(Properties props) throws Exception {
     props.setProperty(MCAST_PORT, "0");
     cache = new CacheFactory(props).create();
     if (cache == null) {
@@ -169,22 +192,21 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     return cacheServerPort;
   }
 
-  public int getCacheServerPort() {
+  private int getCacheServerPort() {
     return cacheServerPort;
   }
 
-  public String getCacheServerHost() {
+  private String getCacheServerHost() {
     return hostName;
   }
 
-  @SuppressWarnings("rawtypes")
-  public void setUpServerVM(final boolean cacheServerSslenabled, int optionalLocatorPort)
+  private void setUpServerVM(final boolean cacheServerSslenabled, int optionalLocatorPort)
       throws Exception {
     System.setProperty("javax.net.debug", "ssl,handshake");
 
     Properties gemFireProps = new Properties();
     if (optionalLocatorPort > 0) {
-      gemFireProps.put("locators", "localhost[" + optionalLocatorPort + "]");
+      gemFireProps.setProperty("locators", "localhost[" + optionalLocatorPort + "]");
     }
 
     String cacheServerSslprotocols = "any";
@@ -194,10 +216,10 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
       getNewSSLSettings(gemFireProps, cacheServerSslprotocols, cacheServerSslciphers,
           cacheServerSslRequireAuth);
     } else {
-      gemFireProps.put(CLUSTER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
-      gemFireProps.put(CLUSTER_SSL_PROTOCOLS, cacheServerSslprotocols);
-      gemFireProps.put(CLUSTER_SSL_CIPHERS, cacheServerSslciphers);
-      gemFireProps.put(CLUSTER_SSL_REQUIRE_AUTHENTICATION,
+      gemFireProps.setProperty(CLUSTER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
+      gemFireProps.setProperty(CLUSTER_SSL_PROTOCOLS, cacheServerSslprotocols);
+      gemFireProps.setProperty(CLUSTER_SSL_CIPHERS, cacheServerSslciphers);
+      gemFireProps.setProperty(CLUSTER_SSL_REQUIRE_AUTHENTICATION,
           String.valueOf(cacheServerSslRequireAuth));
 
       String keyStore =
@@ -206,11 +228,11 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
       String trustStore =
           createTempFileFromResource(CacheServerSSLConnectionDUnitTest.class,
               SERVER_TRUST_STORE).getAbsolutePath();
-      gemFireProps.put(CLUSTER_SSL_KEYSTORE_TYPE, "jks");
-      gemFireProps.put(CLUSTER_SSL_KEYSTORE, keyStore);
-      gemFireProps.put(CLUSTER_SSL_KEYSTORE_PASSWORD, "password");
-      gemFireProps.put(CLUSTER_SSL_TRUSTSTORE, trustStore);
-      gemFireProps.put(CLUSTER_SSL_TRUSTSTORE_PASSWORD, "password");
+      gemFireProps.setProperty(CLUSTER_SSL_KEYSTORE_TYPE, "jks");
+      gemFireProps.setProperty(CLUSTER_SSL_KEYSTORE, keyStore);
+      gemFireProps.setProperty(CLUSTER_SSL_KEYSTORE_PASSWORD, "password");
+      gemFireProps.setProperty(CLUSTER_SSL_TRUSTSTORE, trustStore);
+      gemFireProps.setProperty(CLUSTER_SSL_TRUSTSTORE_PASSWORD, "password");
     }
     StringWriter sw = new StringWriter();
     PrintWriter writer = new PrintWriter(sw);
@@ -225,11 +247,11 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
 
   private void getNewSSLSettings(Properties gemFireProps, String cacheServerSslprotocols,
       String cacheServerSslciphers, boolean cacheServerSslRequireAuth) {
-    gemFireProps.put(SSL_ENABLED_COMPONENTS,
+    gemFireProps.setProperty(SSL_ENABLED_COMPONENTS,
         SecurableCommunicationChannel.CLUSTER + "," + SecurableCommunicationChannel.SERVER);
-    gemFireProps.put(SSL_PROTOCOLS, cacheServerSslprotocols);
-    gemFireProps.put(SSL_CIPHERS, cacheServerSslciphers);
-    gemFireProps.put(SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
+    gemFireProps.setProperty(SSL_PROTOCOLS, cacheServerSslprotocols);
+    gemFireProps.setProperty(SSL_CIPHERS, cacheServerSslciphers);
+    gemFireProps.setProperty(SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
 
     String keyStore =
         createTempFileFromResource(CacheServerSSLConnectionDUnitTest.class, SERVER_KEY_STORE)
@@ -237,17 +259,16 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     String trustStore =
         createTempFileFromResource(CacheServerSSLConnectionDUnitTest.class, SERVER_TRUST_STORE)
             .getAbsolutePath();
-    gemFireProps.put(SSL_KEYSTORE_TYPE, "jks");
-    gemFireProps.put(SSL_KEYSTORE, keyStore);
-    gemFireProps.put(SSL_KEYSTORE_PASSWORD, "password");
-    gemFireProps.put(SSL_TRUSTSTORE, trustStore);
-    gemFireProps.put(SSL_TRUSTSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SSL_KEYSTORE_TYPE, "jks");
+    gemFireProps.setProperty(SSL_KEYSTORE, keyStore);
+    gemFireProps.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SSL_TRUSTSTORE, trustStore);
+    gemFireProps.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
   }
 
-  public void setUpClientVM(String host, int port, boolean cacheServerSslenabled,
+  private void setUpClientVM(String host, int port, boolean cacheServerSslenabled,
       boolean cacheServerSslRequireAuth, String keyStore, String trustStore, boolean subscription,
       boolean clientHasTrustedKeystore) {
-
     System.setProperty("javax.net.debug", "ssl,handshake");
     Properties gemFireProps = new Properties();
 
@@ -263,46 +284,46 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
 
     if (cacheServerSslenabled) {
       if (useOldSSLSettings) {
-        gemFireProps.put(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
-        gemFireProps.put(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
-        gemFireProps.put(SERVER_SSL_CIPHERS, cacheServerSslciphers);
-        gemFireProps.put(SERVER_SSL_REQUIRE_AUTHENTICATION,
+        gemFireProps.setProperty(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
+        gemFireProps.setProperty(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
+        gemFireProps.setProperty(SERVER_SSL_CIPHERS, cacheServerSslciphers);
+        gemFireProps.setProperty(SERVER_SSL_REQUIRE_AUTHENTICATION,
             String.valueOf(cacheServerSslRequireAuth));
         if (clientHasTrustedKeystore) {
-          gemFireProps.put(SERVER_SSL_KEYSTORE_TYPE, "jks");
-          gemFireProps.put(SERVER_SSL_KEYSTORE, keyStorePath);
-          gemFireProps.put(SERVER_SSL_KEYSTORE_PASSWORD, "password");
-          gemFireProps.put(SERVER_SSL_TRUSTSTORE, trustStorePath);
-          gemFireProps.put(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE_TYPE, "jks");
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE, keyStorePath);
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE, trustStorePath);
+          gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
         } else {
-          gemFireProps.put(SERVER_SSL_KEYSTORE_TYPE, "jks");
-          gemFireProps.put(SERVER_SSL_KEYSTORE, "");
-          gemFireProps.put(SERVER_SSL_KEYSTORE_PASSWORD, "password");
-          gemFireProps.put(SERVER_SSL_TRUSTSTORE, trustStorePath);
-          gemFireProps.put(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE_TYPE, "jks");
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE, "");
+          gemFireProps.setProperty(SERVER_SSL_KEYSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE, trustStorePath);
+          gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
         }
       } else {
-        gemFireProps.put(SSL_ENABLED_COMPONENTS, "server");
-        gemFireProps.put(SSL_CIPHERS, cacheServerSslciphers);
-        gemFireProps.put(SSL_PROTOCOLS, cacheServerSslprotocols);
-        gemFireProps.put(SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
+        gemFireProps.setProperty(SSL_ENABLED_COMPONENTS, "server");
+        gemFireProps.setProperty(SSL_CIPHERS, cacheServerSslciphers);
+        gemFireProps.setProperty(SSL_PROTOCOLS, cacheServerSslprotocols);
+        gemFireProps
+            .setProperty(SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
         if (clientHasTrustedKeystore) {
-          gemFireProps.put(SSL_KEYSTORE_TYPE, "jks");
-          gemFireProps.put(SSL_KEYSTORE, keyStorePath);
-          gemFireProps.put(SSL_KEYSTORE_PASSWORD, "password");
-          gemFireProps.put(SSL_TRUSTSTORE, trustStorePath);
-          gemFireProps.put(SSL_TRUSTSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SSL_KEYSTORE_TYPE, "jks");
+          gemFireProps.setProperty(SSL_KEYSTORE, keyStorePath);
+          gemFireProps.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SSL_TRUSTSTORE, trustStorePath);
+          gemFireProps.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
         } else {
-          gemFireProps.put(SSL_KEYSTORE_TYPE, "jks");
-          gemFireProps.put(SSL_KEYSTORE, "");
-          gemFireProps.put(SSL_KEYSTORE_PASSWORD, "password");
-          gemFireProps.put(SSL_TRUSTSTORE, trustStorePath);
-          gemFireProps.put(SSL_TRUSTSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SSL_KEYSTORE_TYPE, "jks");
+          gemFireProps.setProperty(SSL_KEYSTORE, "");
+          gemFireProps.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+          gemFireProps.setProperty(SSL_TRUSTSTORE, trustStorePath);
+          gemFireProps.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
         }
       }
     }
 
-
     StringWriter sw = new StringWriter();
     PrintWriter writer = new PrintWriter(sw);
     gemFireProps.list(writer);
@@ -319,48 +340,48 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     assertNotNull(region);
   }
 
-  public void doClientRegionTest() {
+  private void doClientRegionTest() {
     Region<String, String> region = clientCache.getRegion("serverRegion");
     assertEquals("servervalue", region.get("serverkey"));
     region.put("clientkey", "clientvalue");
     assertEquals("clientvalue", region.get("clientkey"));
   }
 
-  public void doServerRegionTest() {
+  private void doServerRegionTest() {
     Region<String, String> region = cache.getRegion("serverRegion");
     assertEquals("servervalue", region.get("serverkey"));
     assertEquals("clientvalue", region.get("clientkey"));
   }
 
 
-  public static void setUpServerVMTask(boolean cacheServerSslenabled, int optionalLocatorPort)
+  private static void setUpServerVMTask(boolean cacheServerSslenabled, int optionalLocatorPort)
       throws Exception {
     instance.setUpServerVM(cacheServerSslenabled, optionalLocatorPort);
   }
 
-  public static int createServerTask() throws Exception {
+  private static int createServerTask() throws Exception {
     return instance.createServer();
   }
 
-  public static void setUpClientVMTask(String host, int port, boolean cacheServerSslenabled,
+  private static void setUpClientVMTask(String host, int port, boolean cacheServerSslenabled,
       boolean cacheServerSslRequireAuth, String keyStore, String trustStore,
-      boolean clientHasTrustedKeystore) throws Exception {
+      boolean clientHasTrustedKeystore) {
     instance.setUpClientVM(host, port, cacheServerSslenabled, cacheServerSslRequireAuth, keyStore,
         trustStore, true, clientHasTrustedKeystore);
   }
 
-  public static void setUpClientVMTaskNoSubscription(String host, int port,
+  private static void setUpClientVMTaskNoSubscription(String host, int port,
       boolean cacheServerSslenabled, boolean cacheServerSslRequireAuth, String keyStore,
-      String trustStore) throws Exception {
+      String trustStore) {
     instance.setUpClientVM(host, port, cacheServerSslenabled, cacheServerSslRequireAuth, keyStore,
         trustStore, false, true);
   }
 
-  public static void doClientRegionTestTask() {
+  private static void doClientRegionTestTask() {
     instance.doClientRegionTest();
   }
 
-  public static void verifyServerDoesNotReceiveClientUpdate() {
+  private static void verifyServerDoesNotReceiveClientUpdate() {
     instance.doVerifyServerDoesNotReceiveClientUpdate();
   }
 
@@ -369,24 +390,24 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     assertFalse(region.containsKey("clientkey"));
   }
 
-  public static void doServerRegionTestTask() {
+  private static void doServerRegionTestTask() {
     instance.doServerRegionTest();
   }
 
-  public static Object[] getCacheServerEndPointTask() { // TODO: avoid Object[]
+  private static Object[] getCacheServerEndPointTask() { // TODO: avoid Object[]
     Object[] array = new Object[2];
     array[0] = instance.getCacheServerHost();
     array[1] = instance.getCacheServerPort();
     return array;
   }
 
-  public static void closeCacheTask() {
+  private static void closeCacheTask() {
     if (instance != null && instance.cache != null) {
       instance.cache.close();
     }
   }
 
-  public static void closeClientCacheTask() {
+  private static void closeClientCacheTask() {
     if (instance != null && instance.clientCache != null) {
       instance.clientCache.close();
     }
@@ -394,10 +415,9 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
 
   @Test
   public void testCacheServerSSL() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
-    VM serverVM2 = host.getVM(3);
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
+    VM serverVM2 = getVM(3);
 
     boolean cacheServerSslenabled = true;
     boolean cacheClientSslenabled = true;
@@ -417,7 +437,7 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
       serverVM2.invoke(() -> setUpServerVMTask(cacheServerSslenabled, locatorPort));
       serverVM2.invoke(() -> createServerTask());
 
-      String hostName = host.getHostName();
+      String hostName = getHostName();
 
       clientVM.invoke(() -> setUpClientVMTask(hostName, port, cacheClientSslenabled,
           cacheClientSslRequireAuth, CLIENT_KEY_STORE, CLIENT_TRUST_STORE, true));
@@ -430,25 +450,25 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
 
   /**
    * GEODE-2898: A non-responsive SSL client can block a server's "acceptor" thread
+   *
    * <p>
    * Start a server and then connect to it without completing the SSL handshake
-   * </p>
+   *
    * <p>
    * Attempt to connect to the server using a real SSL client, demonstrating that the server is not
    * blocked and can process the new connection request.
-   * </p>
    */
   @Test
   public void clientSlowToHandshakeDoesNotBlockServer() throws Throwable {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
-    VM slowClientVM = host.getVM(3);
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
+    VM slowClientVM = getVM(3);
+
     getBlackboard().initBlackboard();
 
     // a plain-text socket is used to connect to an ssl server & the handshake
     // is never performed. The server will log this exception & it should be ignored
-    IgnoredException.addIgnoredException("javax.net.ssl.SSLHandshakeException", serverVM);
+    addIgnoredException(SSLHandshakeException.class);
 
     boolean cacheServerSslenabled = true;
     boolean cacheClientSslenabled = true;
@@ -457,7 +477,7 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled, 0));
     int port = serverVM.invoke(() -> createServerTask());
 
-    String hostName = host.getHostName();
+    String hostName = getHostName();
 
     AsyncInvocation slowAsync = slowClientVM.invokeAsync(() -> connectToServer(hostName, port));
     try {
@@ -470,18 +490,8 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
 
     } finally {
       getBlackboard().signalGate("testIsCompleted");
-      try {
-        if (slowAsync.isAlive()) {
-          slowAsync.join(60000);
-        }
-        if (slowAsync.exceptionOccurred()) {
-          throw slowAsync.getException();
-        }
-      } finally {
-        assertFalse(slowAsync.isAlive());
-      }
+      slowAsync.await();
     }
-
   }
 
   private void connectToServer(String hostName, int port) throws Exception {
@@ -496,10 +506,9 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
   }
 
   @Test
-  public void testNonSSLClient() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
+  public void testNonSSLClient() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
 
     boolean cacheServerSslenabled = true;
     boolean cacheClientSslenabled = false;
@@ -508,14 +517,12 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled, 0));
     serverVM.invoke(() -> createServerTask());
 
-    Object array[] = (Object[]) serverVM.invoke(() -> getCacheServerEndPointTask());
+    Object array[] = serverVM.invoke(() -> getCacheServerEndPointTask());
     String hostName = (String) array[0];
     int port = (Integer) array[1];
 
-    IgnoredException expect =
-        IgnoredException.addIgnoredException("javax.net.ssl.SSLException", serverVM);
-    IgnoredException expect2 = IgnoredException.addIgnoredException("IOException", serverVM);
-    try {
+    try (IgnoredException i1 = addIgnoredException(SSLException.class);
+        IgnoredException i2 = addIgnoredException(IOException.class)) {
       clientVM.invoke(() -> setUpClientVMTaskNoSubscription(hostName, port, cacheClientSslenabled,
           cacheClientSslRequireAuth, TRUSTED_STORE, TRUSTED_STORE));
       clientVM.invoke(() -> doClientRegionTestTask());
@@ -523,53 +530,39 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
       fail("Test should fail as non-ssl client is trying to connect to ssl configured server");
 
     } catch (Exception rmiException) {
-      Throwable e = rmiException.getCause();
-      // getLogWriter().info("ExceptionCause at clientVM " + e);
-      if (e instanceof org.apache.geode.cache.client.ServerOperationException) {
-        Throwable t = e.getCause();
-        // getLogWriter().info("Cause is " + t);
-        assertTrue(t instanceof org.apache.geode.security.AuthenticationRequiredException);
-      } else {
-        // getLogWriter().error("Unexpected exception ", e);
-        fail("Unexpected Exception: " + e + " expected: " + AuthenticationRequiredException.class);
-      }
-    } finally {
-      expect.remove();
-      expect2.remove();
+      assertThat(rmiException).hasRootCauseInstanceOf(AuthenticationRequiredException.class);
     }
   }
 
   @Test
-  public void testSSLClientWithNoAuth() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
+  public void testSSLClientWithNoAuth() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
 
     boolean cacheServerSslenabled = true;
     boolean cacheClientSslenabled = true;
     boolean cacheClientSslRequireAuth = false;
 
-    IgnoredException.addIgnoredException("SSLHandshakeException");
-    IgnoredException.addIgnoredException("ValidatorException");
+    addIgnoredException("SSLHandshakeException");
+    addIgnoredException("ValidatorException");
 
     serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled, 0));
     serverVM.invoke(() -> createServerTask());
 
-    Object array[] = (Object[]) serverVM.invoke(() -> getCacheServerEndPointTask());
+    Object array[] = serverVM.invoke(() -> getCacheServerEndPointTask());
     String hostName = (String) array[0];
     int port = (Integer) array[1];
 
     clientVM.invoke(() -> setUpClientVMTask(hostName, port, cacheClientSslenabled,
         cacheClientSslRequireAuth, CLIENT_KEY_STORE, CLIENT_TRUST_STORE, true));
-    clientVM.invoke(() -> CacheServerSSLConnectionDUnitTest.doClientRegionTestTask());
-    serverVM.invoke(() -> CacheServerSSLConnectionDUnitTest.doServerRegionTestTask());
+    clientVM.invoke(() -> doClientRegionTestTask());
+    serverVM.invoke(() -> doServerRegionTestTask());
   }
 
   @Test
-  public void untrustedClientIsRejected() throws Throwable {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
+  public void untrustedClientIsRejected() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
 
     boolean cacheServerSslenabled = true;
     boolean cacheClientSslenabled = true;
@@ -578,31 +571,28 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled, 0));
     serverVM.invoke(() -> createServerTask());
 
-    Object array[] = (Object[]) serverVM.invoke(() -> getCacheServerEndPointTask());
+    Object array[] = serverVM.invoke(() -> getCacheServerEndPointTask());
     String hostName = (String) array[0];
     int port = (Integer) array[1];
 
-    IgnoredException.addIgnoredException("SSLHandshakeException");
+    addIgnoredException("SSLHandshakeException");
 
     clientVM.invoke(() -> setUpClientVMTask(hostName, port, cacheClientSslenabled,
         cacheClientSslRequireAuth, "default.keystore", CLIENT_TRUST_STORE, false));
 
     try {
-      clientVM.invoke(() -> CacheServerSSLConnectionDUnitTest.doClientRegionTestTask());
+      clientVM.invoke(() -> doClientRegionTestTask());
       fail("client should not have been able to execute a cache operation");
     } catch (RMIException e) {
-      assertTrue("expected a NoAvailableServersException but received " + e.getCause(),
-          e.getCause() instanceof NoAvailableServersException);
+      assertThat(e).hasRootCauseInstanceOf(NoAvailableServersException.class);
     }
-    serverVM
-        .invoke(() -> CacheServerSSLConnectionDUnitTest.verifyServerDoesNotReceiveClientUpdate());
+    serverVM.invoke(() -> verifyServerDoesNotReceiveClientUpdate());
   }
 
   @Test
-  public void testSSLClientWithNonSSLServer() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
+  public void testSSLClientWithNonSSLServer() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
 
     boolean cacheServerSslenabled = false;
     boolean cacheClientSslenabled = true;
@@ -611,13 +601,11 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
     serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled, 0));
     serverVM.invoke(() -> createServerTask());
 
-    Object array[] = (Object[]) serverVM.invoke(() -> getCacheServerEndPointTask());
+    Object array[] = serverVM.invoke(() -> getCacheServerEndPointTask());
     String hostName = (String) array[0];
     int port = (Integer) array[1];
 
-    IgnoredException expect =
-        IgnoredException.addIgnoredException("javax.net.ssl.SSLHandshakeException", serverVM);
-    try {
+    try (IgnoredException i = addIgnoredException(SSLHandshakeException.class)) {
       clientVM.invoke(() -> setUpClientVMTask(hostName, port, cacheClientSslenabled,
           cacheClientSslRequireAuth, TRUSTED_STORE, TRUSTED_STORE, true));
       clientVM.invoke(() -> doClientRegionTestTask());
@@ -625,20 +613,9 @@ public class CacheServerSSLConnectionDUnitTest extends JUnit4DistributedTestCase
       fail(
           "Test should fail as ssl client with ssl enabled is trying to connect to server with ssl disabled");
 
-    } catch (Exception rmiException) {
+    } catch (Exception e) {
       // ignore
-    } finally {
-      expect.remove();
+      assertThat(e).hasRootCauseInstanceOf(NoAvailableServersException.class);
     }
   }
-
-  @Override
-  public final void preTearDown() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
-    clientVM.invoke(() -> closeClientCacheTask());
-    serverVM.invoke(() -> closeCacheTask());
-
-  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/SSLNoClientAuthDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/SSLNoClientAuthDUnitTest.java
index 6989849..54f1aa3 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/SSLNoClientAuthDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/SSLNoClientAuthDUnitTest.java
@@ -25,16 +25,18 @@ import static org.apache.geode.distributed.ConfigurationProperties.SERVER_SSL_PR
 import static org.apache.geode.distributed.ConfigurationProperties.SERVER_SSL_REQUIRE_AUTHENTICATION;
 import static org.apache.geode.distributed.ConfigurationProperties.SERVER_SSL_TRUSTSTORE;
 import static org.apache.geode.distributed.ConfigurationProperties.SERVER_SSL_TRUSTSTORE_PASSWORD;
+import static org.apache.geode.test.dunit.VM.getVM;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.Properties;
 
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -49,15 +51,11 @@ import org.apache.geode.cache.client.ClientRegionFactory;
 import org.apache.geode.cache.client.ClientRegionShortcut;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.MembershipTest;
 
-/**
- * Test for GEODE-396
- */
-@Category({MembershipTest.class})
+@Category(MembershipTest.class)
 public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
 
   private Cache cache;
@@ -70,19 +68,56 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
 
   private static SSLNoClientAuthDUnitTest instance = new SSLNoClientAuthDUnitTest();
 
-  @Override
-  public final void preSetUp() throws Exception {
+  @Before
+  public void setUp() {
     disconnectAllFromDS();
   }
 
-  public Cache createCache(Properties props) throws Exception {
+  @After
+  public void tearDown() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
+
+    clientVM.invoke(() -> closeClientCacheTask());
+    serverVM.invoke(() -> closeCacheTask());
+  }
+
+  @Test
+  public void testSSLServerWithNoAuth() {
+    VM serverVM = getVM(1);
+    VM clientVM = getVM(2);
+
+    boolean cacheServerSslenabled = true;
+    boolean cacheClientSslenabled = true;
+    boolean cacheClientSslRequireAuth = true;
+
+    serverVM.invoke(() -> setUpServerVMTask(cacheServerSslenabled));
+    serverVM.invoke(() -> createServerTask());
+
+    Object array[] = serverVM.invoke(() -> getCacheServerEndPointTask());
+    String hostName = (String) array[0];
+    int port = (Integer) array[1];
+    Object params[] = new Object[6];
+    params[0] = hostName;
+    params[1] = port;
+    params[2] = cacheClientSslenabled;
+    params[3] = cacheClientSslRequireAuth;
+    params[4] = DEFAULT_STORE;
+    params[5] = DEFAULT_STORE;
+
+    clientVM.invoke(() -> setUpClientVMTask(hostName, port,
+        cacheClientSslenabled, cacheClientSslRequireAuth, DEFAULT_STORE, DEFAULT_STORE));
+    clientVM.invoke(() -> doClientRegionTestTask());
+    serverVM.invoke(() -> doServerRegionTestTask());
+  }
+
+  private void createCache(Properties props) throws Exception {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
     cache = new CacheFactory(props).create();
     if (cache == null) {
       throw new Exception("CacheFactory.create() returned null ");
     }
-    return cache;
   }
 
   private void createServer() throws IOException {
@@ -93,30 +128,26 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
     hostName = cacheServer.getHostnameForClients();
   }
 
-  public int getCacheServerPort() {
+  private int getCacheServerPort() {
     return cacheServerPort;
   }
 
-  public String getCacheServerHost() {
+  private String getCacheServerHost() {
     return hostName;
   }
 
-  public void stopCacheServer() {
-    this.cacheServer.stop();
-  }
-
-
-  @SuppressWarnings("rawtypes")
-  public void setUpServerVM(boolean cacheServerSslenabled) throws Exception {
+  private void setUpServerVM(boolean cacheServerSslenabled) throws Exception {
     Properties gemFireProps = new Properties();
 
     String cacheServerSslprotocols = "any";
     String cacheServerSslciphers = "any";
     boolean cacheServerSslRequireAuth = false;
-    gemFireProps.put(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
-    gemFireProps.put(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
-    gemFireProps.put(SERVER_SSL_CIPHERS, cacheServerSslciphers);
-    gemFireProps.put(SERVER_SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
+
+    gemFireProps.setProperty(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
+    gemFireProps.setProperty(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
+    gemFireProps.setProperty(SERVER_SSL_CIPHERS, cacheServerSslciphers);
+    gemFireProps
+        .setProperty(SERVER_SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
 
     String keyStore =
         createTempFileFromResource(SSLNoClientAuthDUnitTest.class, DEFAULT_STORE)
@@ -124,16 +155,15 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
     String trustStore =
         createTempFileFromResource(SSLNoClientAuthDUnitTest.class, DEFAULT_STORE)
             .getAbsolutePath();
-    gemFireProps.put(SERVER_SSL_KEYSTORE_TYPE, "jks");
-    gemFireProps.put(SERVER_SSL_KEYSTORE, keyStore);
-    gemFireProps.put(SERVER_SSL_KEYSTORE_PASSWORD, "password");
-    gemFireProps.put(SERVER_SSL_TRUSTSTORE, trustStore);
-    gemFireProps.put(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE_TYPE, "jks");
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE, keyStore);
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE, trustStore);
+    gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
 
     StringWriter sw = new StringWriter();
     PrintWriter writer = new PrintWriter(sw);
     gemFireProps.list(writer);
-    System.out.println("Starting cacheserver ds with following properties \n" + sw);
     createCache(gemFireProps);
 
     RegionFactory factory = cache.createRegionFactory(RegionShortcut.REPLICATE);
@@ -141,9 +171,8 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
     r.put("serverkey", "servervalue");
   }
 
-  public void setUpClientVM(String host, int port, boolean cacheServerSslenabled,
+  private void setUpClientVM(String host, int port, boolean cacheServerSslenabled,
       boolean cacheServerSslRequireAuth, String keyStore, String trustStore) {
-
     Properties gemFireProps = new Properties();
 
     String cacheServerSslprotocols = "any";
@@ -156,21 +185,21 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
         createTempFileFromResource(SSLNoClientAuthDUnitTest.class, trustStore)
             .getAbsolutePath();
     // using new server-ssl-* properties
-    gemFireProps.put(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
-    gemFireProps.put(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
-    gemFireProps.put(SERVER_SSL_CIPHERS, cacheServerSslciphers);
-    gemFireProps.put(SERVER_SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
-
-    gemFireProps.put(SERVER_SSL_KEYSTORE_TYPE, "jks");
-    gemFireProps.put(SERVER_SSL_KEYSTORE, keyStorePath);
-    gemFireProps.put(SERVER_SSL_KEYSTORE_PASSWORD, "password");
-    gemFireProps.put(SERVER_SSL_TRUSTSTORE, trustStorePath);
-    gemFireProps.put(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
+    gemFireProps.setProperty(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
+    gemFireProps.setProperty(SERVER_SSL_CIPHERS, cacheServerSslciphers);
+    gemFireProps
+        .setProperty(SERVER_SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
+
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE_TYPE, "jks");
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE, keyStorePath);
+    gemFireProps.setProperty(SERVER_SSL_KEYSTORE_PASSWORD, "password");
+    gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE, trustStorePath);
+    gemFireProps.setProperty(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
 
     StringWriter sw = new StringWriter();
     PrintWriter writer = new PrintWriter(sw);
     gemFireProps.list(writer);
-    System.out.println("Starting client ds with following properties \n" + sw.getBuffer());
 
     ClientCacheFactory clientCacheFactory = new ClientCacheFactory(gemFireProps);
     clientCacheFactory.addPoolServer(host, port);
@@ -182,107 +211,57 @@ public class SSLNoClientAuthDUnitTest extends JUnit4DistributedTestCase {
     assertNotNull(region);
   }
 
-  public void doClientRegionTest() {
+  private void doClientRegionTest() {
     Region<String, String> region = clientCache.getRegion("serverRegion");
     assertEquals("servervalue", region.get("serverkey"));
     region.put("clientkey", "clientvalue");
     assertEquals("clientvalue", region.get("clientkey"));
   }
 
-  public void doServerRegionTest() {
+  private void doServerRegionTest() {
     Region<String, String> region = cache.getRegion("serverRegion");
     assertEquals("servervalue", region.get("serverkey"));
     assertEquals("clientvalue", region.get("clientkey"));
   }
 
-
-  public static void setUpServerVMTask(boolean cacheServerSslenabled) throws Exception {
+  private static void setUpServerVMTask(boolean cacheServerSslenabled) throws Exception {
     instance.setUpServerVM(cacheServerSslenabled);
   }
 
-  public static void createServerTask() throws Exception {
+  private static void createServerTask() throws Exception {
     instance.createServer();
   }
 
-  public static void setUpClientVMTask(String host, int port, boolean cacheServerSslenabled,
-      boolean cacheServerSslRequireAuth, String keyStore, String trustStore) throws Exception {
+  private static void setUpClientVMTask(String host, int port, boolean cacheServerSslenabled,
+      boolean cacheServerSslRequireAuth, String keyStore, String trustStore) {
     instance.setUpClientVM(host, port, cacheServerSslenabled, cacheServerSslRequireAuth, keyStore,
         trustStore);
   }
 
-  public static void doClientRegionTestTask() {
+  private static void doClientRegionTestTask() {
     instance.doClientRegionTest();
   }
 
-  public static void doServerRegionTestTask() {
+  private static void doServerRegionTestTask() {
     instance.doServerRegionTest();
   }
 
-  public static Object[] getCacheServerEndPointTask() {
+  private static Object[] getCacheServerEndPointTask() {
     Object[] array = new Object[2];
     array[0] = instance.getCacheServerHost();
     array[1] = instance.getCacheServerPort();
     return array;
   }
 
-  public static void closeCacheTask() {
+  private static void closeCacheTask() {
     if (instance != null && instance.cache != null) {
       instance.cache.close();
     }
   }
 
-  public static void closeClientCacheTask() {
+  private static void closeClientCacheTask() {
     if (instance != null && instance.clientCache != null) {
       instance.clientCache.close();
     }
   }
-
-  /**
-   * Test for GEODE-396
-   */
-  @Test
-  public void testSSLServerWithNoAuth() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
-
-    boolean cacheServerSslenabled = true;
-    boolean cacheClientSslenabled = true;
-    boolean cacheClientSslRequireAuth = true;
-
-    serverVM.invoke(() -> SSLNoClientAuthDUnitTest.setUpServerVMTask(cacheServerSslenabled));
-    serverVM.invoke(() -> SSLNoClientAuthDUnitTest.createServerTask());
-
-    Object array[] =
-        (Object[]) serverVM.invoke(() -> SSLNoClientAuthDUnitTest.getCacheServerEndPointTask());
-    String hostName = (String) array[0];
-    int port = (Integer) array[1];
-    Object params[] = new Object[6];
-    params[0] = hostName;
-    params[1] = port;
-    params[2] = cacheClientSslenabled;
-    params[3] = cacheClientSslRequireAuth;
-    params[4] = DEFAULT_STORE;
-    params[5] = DEFAULT_STORE;
-    // getLogWriter().info("Starting client with server endpoint " + hostName + ":" + port);
-    try {
-      clientVM.invoke(() -> SSLNoClientAuthDUnitTest.setUpClientVMTask(hostName, port,
-          cacheClientSslenabled, cacheClientSslRequireAuth, DEFAULT_STORE, DEFAULT_STORE));
-      clientVM.invoke(() -> SSLNoClientAuthDUnitTest.doClientRegionTestTask());
-      serverVM.invoke(() -> SSLNoClientAuthDUnitTest.doServerRegionTestTask());
-    } catch (Exception rmiException) {
-      Throwable e = rmiException.getCause();
-      // getLogWriter().info("ExceptionCause at clientVM " + e);
-      fail("Unexpected Exception " + e);
-    }
-  }
-
-  @Override
-  public final void preTearDown() throws Exception {
-    final Host host = Host.getHost(0);
-    VM serverVM = host.getVM(1);
-    VM clientVM = host.getVM(2);
-    clientVM.invoke(() -> SSLNoClientAuthDUnitTest.closeClientCacheTask());
-    serverVM.invoke(() -> SSLNoClientAuthDUnitTest.closeCacheTask());
-  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PartitionedRegionCompactRangeIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PartitionedRegionCompactRangeIndexDUnitTest.java
index f0b91d3..afb9b5d 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PartitionedRegionCompactRangeIndexDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PartitionedRegionCompactRangeIndexDUnitTest.java
@@ -16,9 +16,9 @@ package org.apache.geode.cache.query.dunit;
 
 import static org.apache.geode.distributed.ConfigurationProperties.CACHE_XML_FILE;
 import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
 
 import java.io.Serializable;
 import java.util.Collection;
@@ -34,29 +34,30 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.query.IndexNameConflictException;
 import org.apache.geode.cache.query.Query;
 import org.apache.geode.cache.query.QueryService;
 import org.apache.geode.cache.query.SelectResults;
 import org.apache.geode.cache.query.data.Portfolio;
 import org.apache.geode.test.dunit.DistributedTestUtils;
-import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.rules.ClusterStartupRule;
 import org.apache.geode.test.dunit.rules.MemberVM;
 import org.apache.geode.test.junit.categories.OQLQueryTest;
 
-@Category({OQLQueryTest.class})
+@Category(OQLQueryTest.class)
 public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable {
+
   @Rule
   public ClusterStartupRule clusterStartupRule = new ClusterStartupRule();
 
-  protected MemberVM locator;
+  private MemberVM locator;
 
-  Properties props;
+  private Properties props;
 
-  MemberVM server1;
-  MemberVM server2;
-  MemberVM server3;
+  private MemberVM server1;
+  private MemberVM server2;
+  private MemberVM server3;
 
   private Properties getSystemProperties(String cacheXML) {
     Properties props = new Properties();
@@ -67,22 +68,22 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   }
 
   @Before
-  public void before() throws Exception {
-    this.locator = this.clusterStartupRule.startLocatorVM(0);
+  public void before() {
+    locator = clusterStartupRule.startLocatorVM(0);
     props = getSystemProperties("PersistentPartitionWithIndex.xml");
 
-    server1 = this.clusterStartupRule.startServerVM(1, props, this.locator.getPort());
-    server2 = this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
-    server3 = this.clusterStartupRule.startServerVM(3, props, this.locator.getPort());
+    server1 = clusterStartupRule.startServerVM(1, props, locator.getPort());
+    server2 = clusterStartupRule.startServerVM(2, props, locator.getPort());
+    server3 = clusterStartupRule.startServerVM(3, props, locator.getPort());
 
     // Adding due to known race condition for creation of partitioned indexes via cache.xml
-    IgnoredException.addIgnoredException("IndexNameConflictException");
+    addIgnoredException(IndexNameConflictException.class);
   }
 
   @Test
-  public void testGIIUpdateWithIndexDoesNotDuplicateEntryInIndexWhenAlreadyRecoveredFromPersistence()
-      throws Exception {
-    String regionName = "persistentTestRegion"; // this region is created via cache.xml
+  public void testGIIUpdateWithIndexDoesNotDuplicateEntryInIndexWhenAlreadyRecoveredFromPersistence() {
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegion";
     String idQuery = "select * from /" + regionName + " p where p.ID = 1";
     int idQueryExpectedSize = 1;
     int numEntries = 100;
@@ -97,8 +98,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     // update entries
     server3.invoke(() -> populateRegion(regionName, entries));
     clusterStartupRule.stop(1, false);
-    server1 = this.clusterStartupRule.startServerVM(1, props, this.locator.getPort());
-    server2 = this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    server1 = clusterStartupRule.startServerVM(1, props, locator.getPort());
+    server2 = clusterStartupRule.startServerVM(2, props, locator.getPort());
 
     server3.invoke(verifyQueryResultsSize(idQuery, idQueryExpectedSize));
     server2.invoke(verifyQueryResultsSize(idQuery, idQueryExpectedSize));
@@ -108,8 +109,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   @Test
   public void giiWithPersistenceAndStaleDataDueToUpdatesShouldCorrectlyPopulateIndexes()
       throws Exception {
-    String regionName = "persistentTestRegionWithEntrySetIndex"; // this region is created via
-                                                                 // cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegionWithEntrySetIndex";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(10000 + i)));
@@ -125,12 +126,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -150,12 +151,11 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     });
   }
 
-
   @Test
   public void giiWithPersistenceAndStaleDataDueToSameUpdatesShouldCorrectlyPopulateIndexes()
       throws Exception {
-    String regionName = "persistentTestRegionWithEntrySetIndex"; // this region is created via
-    // cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegionWithEntrySetIndex";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(i)));
@@ -171,12 +171,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -196,12 +196,11 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     });
   }
 
-
   @Test
   public void giiWithPersistenceAndStaleDataDueToUpdatesShouldCorrectlyPopulateIndexesWithEntrySet()
       throws Exception {
-    String regionName = "persistentTestRegionWithEntrySetIndex"; // this region is created via
-                                                                 // cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegionWithEntrySetIndex";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(10000 + i)));
@@ -218,12 +217,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -246,8 +245,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   @Test
   public void giiWithPersistenceAndStaleDataDueToDeletesShouldProvideCorrectResultsWithEntrySet()
       throws Exception {
-    String regionName = "persistentTestRegionWithEntrySetIndex"; // this region is created via
-                                                                 // cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegionWithEntrySetIndex";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(i)));
@@ -260,12 +259,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -288,8 +287,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   @Test
   public void giiWithPersistenceAndStaleDataDueToDeletesShouldHaveEmptyIndexesWithEntrySet()
       throws Exception {
-    String regionName = "persistentTestRegionWithEntrySetIndex"; // this region is created via
-                                                                 // cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegionWithEntrySetIndex";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(i)));
@@ -302,12 +301,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -327,7 +326,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   @Test
   public void giiWithPersistenceAndStaleDataDueToDeletesShouldProvideCorrectResultsWithIndexes()
       throws Exception {
-    String regionName = "persistentTestRegion"; // this region is created via cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegion";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(i)));
@@ -339,12 +339,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -367,7 +367,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   @Test
   public void giiWithPersistenceAndStaleDataDueToDeletesShouldHaveEmptyIndexes()
       throws Exception {
-    String regionName = "persistentTestRegion"; // this region is created via cache.xml
+    // this region is created via cache.xml
+    String regionName = "persistentTestRegion";
     int numEntries = 100;
     Map<String, Portfolio> entries = new HashMap<>();
     IntStream.range(0, numEntries).forEach(i -> entries.put("key-" + i, new Portfolio(i)));
@@ -380,12 +381,12 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
     clusterStartupRule.stop(3, false);
 
     Thread t3 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(3, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(3, props, locator.getPort()));
     t3.start();
     Thread t1 =
-        new Thread(() -> this.clusterStartupRule.startServerVM(1, props, this.locator.getPort()));
+        new Thread(() -> clusterStartupRule.startServerVM(1, props, locator.getPort()));
     t1.start();
-    this.clusterStartupRule.startServerVM(2, props, this.locator.getPort());
+    clusterStartupRule.startServerVM(2, props, locator.getPort());
     t3.join();
     t1.join();
 
@@ -403,13 +404,13 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
   }
 
   private void verifyAllEntries(String query, Supplier<IntStream> idsSupplier, int numTimes,
-      int expectedSize) throws Exception {
+      int expectedSize) {
     for (int j = 0; j < numTimes; j++) {
       idsSupplier.get().forEach(i -> {
         try {
           verifyQueryResultsSize(query + i, expectedSize).run();
         } catch (Exception e) {
-          fail();
+          throw new RuntimeException(e);
         }
       });
     }
@@ -425,8 +426,8 @@ public class PartitionedRegionCompactRangeIndexDUnitTest implements Serializable
           SelectResults sr = (SelectResults) q.execute();
           assertEquals(expectedSize, sr.size());
         } catch (Exception e) {
-          e.printStackTrace();
-          fail("Exception occurred when executing verifyQueryResultsSize for query:" + query);
+          throw new RuntimeException(
+              "Exception occurred when executing verifyQueryResultsSize for query:" + query, e);
         }
       }
     };
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
index 8fa8308..570ccbf 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
@@ -76,6 +76,7 @@ import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.test.dunit.Assert;
+import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.SerializableRunnableIF;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
@@ -83,10 +84,12 @@ import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 
 /**
  * This is a helper class for the various Partitioned Query DUnit Test Cases
+ *
+ * TODO: inline and then delete class PRQueryDUnitHelper
  */
 public class PRQueryDUnitHelper implements Serializable {
 
-  static Cache cache = null;
+  static Cache cache;
 
   public static void setCache(Cache cache) {
     PRQueryDUnitHelper.cache = cache;
@@ -96,10 +99,9 @@ public class PRQueryDUnitHelper implements Serializable {
     return cache;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionCreation(
+  CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionCreation(
       final String regionName, final Class constraint) {
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    return new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
         Cache cache = getCache();
@@ -110,7 +112,7 @@ public class PRQueryDUnitHelper implements Serializable {
           attr.setScope(Scope.LOCAL);
           localRegion = cache.createRegion(regionName, attr.create());
         } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
+          LogWriterUtils.getLogWriter().warning(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
               ex);
         }
@@ -126,29 +128,21 @@ public class PRQueryDUnitHelper implements Serializable {
             !localRegion.isDestroyed());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionWithAsyncIndexCreation(
+  CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionWithAsyncIndexCreation(
       final String regionName, final Class constraint) {
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    return new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
         Cache cache = getCache();
-        Region localRegion = null;
-        try {
-          AttributesFactory attr = new AttributesFactory();
-          attr.setValueConstraint(constraint);
-          attr.setScope(Scope.LOCAL);
-          attr.setIndexMaintenanceSynchronous(false);
-          localRegion = cache.createRegion(regionName, attr.create());
-        } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
-              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
-              ex);
-        }
+
+        AttributesFactory attr = new AttributesFactory();
+        attr.setValueConstraint(constraint);
+        attr.setScope(Scope.LOCAL);
+        attr.setIndexMaintenanceSynchronous(false);
+        Region localRegion = cache.createRegion(regionName, attr.create());
+
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Partitioned Region "
                 + regionName + " not in cache",
@@ -161,25 +155,17 @@ public class PRQueryDUnitHelper implements Serializable {
             !localRegion.isDestroyed());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
   public CacheSerializableRunnable getCacheSerializableRunnableForReplicatedRegionCreation(
       final String regionName) {
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    return new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
         Cache cache = getCache();
-        Region localRegion = null;
-        try {
-          localRegion = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
-        } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
-              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
-              ex);
-        }
+
+        Region localRegion = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
+
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Partitioned Region "
                 + regionName + " not in cache",
@@ -192,8 +178,6 @@ public class PRQueryDUnitHelper implements Serializable {
             !localRegion.isDestroyed());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
   /**
@@ -203,14 +187,10 @@ public class PRQueryDUnitHelper implements Serializable {
    */
   public CacheSerializableRunnable getCacheSerializableRunnableForPRCreate(final String regionName,
       final int redundancy, final Class constraint) {
-
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    return new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
-
         Cache cache = getCache();
-        Region partitionedregion = null;
         AttributesFactory attr = new AttributesFactory();
         attr.setValueConstraint(constraint);
 
@@ -219,7 +199,7 @@ public class PRQueryDUnitHelper implements Serializable {
 
         attr.setPartitionAttributes(prAttr);
 
-        partitionedregion = cache.createRegion(regionName, attr.create());
+        Region partitionedregion = cache.createRegion(regionName, attr.create());
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
                 + regionName + " not in cache",
@@ -232,8 +212,6 @@ public class PRQueryDUnitHelper implements Serializable {
             !partitionedregion.isDestroyed());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
   /**
@@ -242,20 +220,16 @@ public class PRQueryDUnitHelper implements Serializable {
    *
    * @return cacheSerializable object
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedPRCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForColocatedPRCreate(
       final String regionName, final int redundancy, final Class constraint,
       boolean makePersistent) {
+    String childRegionName = regionName + "Child";
+    String diskName = "disk";
 
-    final String childRegionName = regionName + "Child";
-    final String diskName = "disk";
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    return new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
-
         Cache cache = getCache();
-        Region partitionedregion = null;
-        Region childRegion = null;
         AttributesFactory attr = new AttributesFactory();
         attr.setValueConstraint(constraint);
         if (makePersistent) {
@@ -276,7 +250,7 @@ public class PRQueryDUnitHelper implements Serializable {
         attr.setPartitionAttributes(paf.create());
 
         // parent region
-        partitionedregion = cache.createRegion(regionName, attr.create());
+        Region partitionedregion = cache.createRegion(regionName, attr.create());
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
                 + regionName + " not in cache",
@@ -292,11 +266,10 @@ public class PRQueryDUnitHelper implements Serializable {
         attr.setValueConstraint(constraint);
         paf.setColocatedWith(regionName);
         attr.setPartitionAttributes(paf.create());
-        childRegion = cache.createRegion(childRegionName, attr.create());
+
+        cache.createRegion(childRegionName, attr.create());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
   /**
@@ -305,20 +278,15 @@ public class PRQueryDUnitHelper implements Serializable {
    *
    * @return cacheSerializable object
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedParentCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForColocatedParentCreate(
       final String regionName, final int redundancy, final Class constraint,
       boolean makePersistent) {
+    String diskName = "disk";
 
-    final String childRegionName = regionName + "Child";
-    final String diskName = "disk";
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName + "-NoChildRegion") {
+    return new CacheSerializableRunnable(regionName + "-NoChildRegion") {
       @Override
       public void run2() throws CacheException {
-
         Cache cache = getCache();
-        Region partitionedregion = null;
-        Region childRegion = null;
         AttributesFactory attr = new AttributesFactory();
         attr.setValueConstraint(constraint);
         if (makePersistent) {
@@ -339,7 +307,7 @@ public class PRQueryDUnitHelper implements Serializable {
         attr.setPartitionAttributes(paf.create());
 
         // parent region
-        partitionedregion = cache.createRegion(regionName, attr.create());
+        Region partitionedregion = cache.createRegion(regionName, attr.create());
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
                 + regionName + " not in cache",
@@ -352,8 +320,6 @@ public class PRQueryDUnitHelper implements Serializable {
             !partitionedregion.isDestroyed());
       }
     };
-
-    return (CacheSerializableRunnable) createPrRegion;
   }
 
   /**
@@ -362,60 +328,58 @@ public class PRQueryDUnitHelper implements Serializable {
    *
    * @return cacheSerializable object
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(
       final String regionName, final int redundancy, final Class constraint, boolean isPersistent) {
 
     final String childRegionName = regionName + "Child";
     final String diskName = "disk";
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName + "-ChildRegion") {
-      @Override
-      public void run2() throws CacheException {
+    SerializableRunnable createPrRegion =
+        new CacheSerializableRunnable(regionName + "-ChildRegion") {
+          @Override
+          public void run2() throws CacheException {
 
-        Cache cache = getCache();
-        Region partitionedregion = null;
-        Region childRegion = null;
-        AttributesFactory attr = new AttributesFactory();
-        attr.setValueConstraint(constraint);
-        if (isPersistent) {
-          DiskStore ds = cache.findDiskStore(diskName);
-          if (ds == null) {
-            // ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
-            ds = cache.createDiskStoreFactory()
-                .setDiskDirs(
-                    org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase.getDiskDirs())
-                .create(diskName);
-          }
-          attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
-          attr.setDiskStoreName(diskName);
-        } else {
-          attr.setDataPolicy(DataPolicy.PARTITION);
-          attr.setDiskStoreName(null);
-        }
+            Cache cache = getCache();
+            Region partitionedregion = null;
+            AttributesFactory attr = new AttributesFactory();
+            attr.setValueConstraint(constraint);
+            if (isPersistent) {
+              DiskStore ds = cache.findDiskStore(diskName);
+              if (ds == null) {
+                // ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
+                ds = cache.createDiskStoreFactory()
+                    .setDiskDirs(
+                        JUnit4CacheTestCase.getDiskDirs())
+                    .create(diskName);
+              }
+              attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+              attr.setDiskStoreName(diskName);
+            } else {
+              attr.setDataPolicy(DataPolicy.PARTITION);
+              attr.setDiskStoreName(null);
+            }
 
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setRedundantCopies(redundancy);
-        attr.setPartitionAttributes(paf.create());
+            PartitionAttributesFactory paf = new PartitionAttributesFactory();
+            paf.setRedundantCopies(redundancy);
+            attr.setPartitionAttributes(paf.create());
 
-        // skip parent region creation
-        // partitionedregion = cache.createRegion(regionName, attr.create());
+            // skip parent region creation
+            // partitionedregion = cache.createRegion(regionName, attr.create());
 
-        // child region
-        attr.setValueConstraint(constraint);
-        paf.setColocatedWith(regionName);
-        attr.setPartitionAttributes(paf.create());
-        childRegion = cache.createRegion(childRegionName, attr.create());
-      }
-    };
+            // child region
+            attr.setValueConstraint(constraint);
+            paf.setColocatedWith(regionName);
+            attr.setPartitionAttributes(paf.create());
+            Region childRegion = cache.createRegion(childRegionName, attr.create());
+          }
+        };
 
     return (CacheSerializableRunnable) createPrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRCreateLimitedBuckets(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRCreateLimitedBuckets(
       final String regionName, final int redundancy, final int buckets) {
 
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    SerializableRunnable createPrRegion = new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
 
@@ -429,7 +393,7 @@ public class PRQueryDUnitHelper implements Serializable {
           attr.setPartitionAttributes(prAttr);
           partitionedregion = cache.createRegion(regionName, attr.create());
         } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
+          LogWriterUtils.getLogWriter().warning(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
               ex);
         }
@@ -449,11 +413,10 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable) createPrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPersistentPRCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForPersistentPRCreate(
       final String regionName, final int redundancy, final Class constraint) {
 
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    SerializableRunnable createPrRegion = new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
 
@@ -474,7 +437,7 @@ public class PRQueryDUnitHelper implements Serializable {
 
           partitionedregion = cache.createRegion(regionName, attr.create());
         } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
+          LogWriterUtils.getLogWriter().warning(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
               ex);
         }
@@ -500,11 +463,10 @@ public class PRQueryDUnitHelper implements Serializable {
    * @return cacheSerializable object
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRColocatedCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRColocatedCreate(
       final String regionName, final int redundancy, final String coloRegionName) {
 
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    SerializableRunnable createPrRegion = new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
 
@@ -522,7 +484,7 @@ public class PRQueryDUnitHelper implements Serializable {
 
           partitionedregion = cache.createRegion(regionName, attr.create());
         } catch (IllegalStateException ex) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning(
+          LogWriterUtils.getLogWriter().warning(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
               ex);
         }
@@ -610,11 +572,11 @@ public class PRQueryDUnitHelper implements Serializable {
               }
             } catch (EntryExistsException e) {
               // Do nothing let it go
-              org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
+              LogWriterUtils.getLogWriter()
                   .info("EntryExistsException was thrown for key " + j);
             } catch (EntryNotFoundException e) {
               // Do nothing let it go
-              org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
+              LogWriterUtils.getLogWriter()
                   .info("EntryNotFoundException was thrown for key " + j);
             }
           }
@@ -629,7 +591,7 @@ public class PRQueryDUnitHelper implements Serializable {
    *
    * @return cacheSerializable object
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRDuplicatePuts(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRDuplicatePuts(
       final String regionName, final Object[] portfolio, final int from, final int to) {
     SerializableRunnable prPuts = new CacheSerializableRunnable("PRPuts") {
       @Override
@@ -648,7 +610,7 @@ public class PRQueryDUnitHelper implements Serializable {
    *
    * @return cacheSerializable object
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRPutsKeyValue(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRPutsKeyValue(
       final String regionName, final Object[] portfolio, final int from, final int to) {
     SerializableRunnable prPuts = new CacheSerializableRunnable("PRPuts") {
       @Override
@@ -669,12 +631,12 @@ public class PRQueryDUnitHelper implements Serializable {
    * 3. Compares the appropriate resultSet <br>
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRQueryAndCompareResults(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRQueryAndCompareResults(
       final String regionName, final String localRegion) {
     return getCacheSerializableRunnableForPRQueryAndCompareResults(regionName, localRegion, false);
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRQueryAndCompareResults(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRQueryAndCompareResults(
       final String regionName, final String localRegion,
       final boolean fullQueryOnPortfolioPositions) {
 
@@ -731,10 +693,10 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         QueryService qs = getCache().getQueryService();
-        Object[] params;
         try {
           for (int j = 0; j < queries.length; j++) {
             synchronized (region) {
+              Object[] params;
               if (fullQueryOnPortfolioPositions) {
                 params = new Object[] {local, new Double((j % 25) * 1.0 + 1)};
                 r[j][0] = qs.newQuery(queries[j]).execute(params);
@@ -763,7 +725,7 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -773,12 +735,12 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -796,7 +758,7 @@ public class PRQueryDUnitHelper implements Serializable {
   }
 
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
+  CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
       final String regionName, final String localRegion) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -856,17 +818,16 @@ public class PRQueryDUnitHelper implements Serializable {
               .info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
         }
 
-        String distinct = "SELECT DISTINCT ";
         QueryService qs = getCache().getQueryService();
         Object[] params;
 
 
         try {
+          String distinct = "SELECT DISTINCT ";
           for (int j = 0; j < queries.length; j++) {
-            String qStr = null;
             synchronized (region) {
               // Execute on local region.
-              qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
+              String qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
               r[j][0] = qs.newQuery(qStr).execute();
 
               // Execute on remote region.
@@ -875,7 +836,7 @@ public class PRQueryDUnitHelper implements Serializable {
             }
           }
 
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
@@ -890,7 +851,7 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -900,12 +861,12 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -922,7 +883,7 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable) PrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(
+  CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(
       final String regionName, final String localRegion) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -931,36 +892,6 @@ public class PRQueryDUnitHelper implements Serializable {
         Cache cache = getCache();
         // Querying the localRegion and the PR region
 
-        String[] queries = new String[] {"p.status from /REGION_NAME p order by p.status",
-            "status, ID from /REGION_NAME order by status, ID",
-            "p.status, p.ID from /REGION_NAME p order by p.status, p.ID",
-            "key.ID from /REGION_NAME.keys key order by key.ID",
-            "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID",
-            "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID",
-            "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID desc",
-            "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID",
-            "p.ID, p.status from /REGION_NAME p order by p.ID desc, p.status asc",
-            "p.ID from /REGION_NAME p, p.positions.values order by p.ID",
-            "p.ID, p.status from /REGION_NAME p, p.positions.values order by p.status, p.ID",
-            "pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId",
-            "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId, p.ID",
-            "p.iD from /REGION_NAME p order by p.iD",
-            "p.iD, p.status from /REGION_NAME p order by p.iD",
-            "iD, status from /REGION_NAME order by iD",
-            "p.getID() from /REGION_NAME p order by p.getID()",
-            "p.names[1] from /REGION_NAME p order by p.names[1]",
-            "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId desc, p.ID",
-            "p.ID, p.position1.secId from /REGION_NAME p order by p.position1.secId, p.ID",
-            "e.key.ID from /REGION_NAME.entries e order by e.key.ID",
-            "e.key.ID, e.value.status from /REGION_NAME.entries e order by e.key.ID",
-            "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID desc , e.value.status desc",
-            "e.key, e.value from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc",
-            "e.key from /REGION_NAME.entrySet e order by e.key.ID desc, e.key.pkid desc",
-            "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId",
-            "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId desc",
-            "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId",};
-
-        Object r[][] = new Object[1][2];
         Region region = cache.getRegion(regionName);
         assertNotNull(region);
 
@@ -974,17 +905,45 @@ public class PRQueryDUnitHelper implements Serializable {
               .info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
         }
 
-        String distinct = "SELECT DISTINCT ";
         QueryService qs = getCache().getQueryService();
         Object[] params;
         StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
 
         try {
+          String distinct = "SELECT DISTINCT ";
+          Object[][] r = new Object[1][2];
+          String[] queries = new String[] {"p.status from /REGION_NAME p order by p.status",
+              "status, ID from /REGION_NAME order by status, ID",
+              "p.status, p.ID from /REGION_NAME p order by p.status, p.ID",
+              "key.ID from /REGION_NAME.keys key order by key.ID",
+              "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID",
+              "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID",
+              "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID desc",
+              "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID",
+              "p.ID, p.status from /REGION_NAME p order by p.ID desc, p.status asc",
+              "p.ID from /REGION_NAME p, p.positions.values order by p.ID",
+              "p.ID, p.status from /REGION_NAME p, p.positions.values order by p.status, p.ID",
+              "pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId",
+              "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId, p.ID",
+              "p.iD from /REGION_NAME p order by p.iD",
+              "p.iD, p.status from /REGION_NAME p order by p.iD",
+              "iD, status from /REGION_NAME order by iD",
+              "p.getID() from /REGION_NAME p order by p.getID()",
+              "p.names[1] from /REGION_NAME p order by p.names[1]",
+              "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId desc, p.ID",
+              "p.ID, p.position1.secId from /REGION_NAME p order by p.position1.secId, p.ID",
+              "e.key.ID from /REGION_NAME.entries e order by e.key.ID",
+              "e.key.ID, e.value.status from /REGION_NAME.entries e order by e.key.ID",
+              "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID desc , e.value.status desc",
+              "e.key, e.value from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc",
+              "e.key from /REGION_NAME.entrySet e order by e.key.ID desc, e.key.pkid desc",
+              "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId",
+              "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId desc",
+              "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId",};
           for (final String query : queries) {
-            String qStr = null;
             synchronized (region) {
               // Execute on local region.
-              qStr = (distinct + query.replace("REGION_NAME", localRegion));
+              String qStr = (distinct + query.replace("REGION_NAME", localRegion));
               r[0][0] = qs.newQuery(qStr).execute();
 
               // Execute on remote region.
@@ -994,7 +953,7 @@ public class PRQueryDUnitHelper implements Serializable {
             }
           }
 
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
         } catch (QueryInvocationTargetException e) {
           // throw an unchecked exception so the controller can examine the cause and see whether or
@@ -1005,7 +964,7 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -1015,12 +974,12 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -1037,7 +996,7 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable) PrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryWithLimit(
+  CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryWithLimit(
       final String regionName, final String localRegion) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -1087,18 +1046,17 @@ public class PRQueryDUnitHelper implements Serializable {
               .info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
         }
 
-        String distinct = "<TRACE>SELECT DISTINCT ";
         QueryService qs = getCache().getQueryService();
         Object[] params;
 
         try {
+          String distinct = "<TRACE>SELECT DISTINCT ";
           for (int l = 1; l <= 3; l++) {
             String[] rq = new String[queries.length];
             for (int j = 0; j < queries.length; j++) {
-              String qStr = null;
               synchronized (region) {
                 // Execute on local region.
-                qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
+                String qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
                 qStr += (" LIMIT " + (l * l));
                 rq[j] = qStr;
                 SelectResults sr = (SelectResults) qs.newQuery(qStr).execute();
@@ -1128,7 +1086,7 @@ public class PRQueryDUnitHelper implements Serializable {
             ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, true, rq);
 
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
         } catch (QueryInvocationTargetException e) {
           // throw an unchecked exception so the controller can examine the cause and see whether or
@@ -1139,7 +1097,7 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -1149,12 +1107,12 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -1171,7 +1129,7 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable) PrRegion;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRCountStarQueries(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRCountStarQueries(
       final String regionName, final String localRegion) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRCountStarQuery") {
       @Override
@@ -1234,10 +1192,9 @@ public class PRQueryDUnitHelper implements Serializable {
 
         try {
           for (int j = 0; j < queries.length; j++) {
-            String qStr = null;
             synchronized (region) {
               // Execute on PR region.
-              qStr = queries[j];
+              String qStr = queries[j];
               SelectResults sr = (SelectResults) qs.newQuery(qStr).execute();
               r[j][0] = sr;
 
@@ -1248,7 +1205,7 @@ public class PRQueryDUnitHelper implements Serializable {
               r[j][1] = srr;
             }
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
@@ -1264,7 +1221,7 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -1274,12 +1231,12 @@ public class PRQueryDUnitHelper implements Serializable {
         }
 
         catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -1299,7 +1256,7 @@ public class PRQueryDUnitHelper implements Serializable {
   /**
    * Insure queries on a pr is using index if not fail.
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForIndexUsageCheck() {
+  CacheSerializableRunnable getCacheSerializableRunnableForIndexUsageCheck() {
     SerializableRunnable PrIndexCheck = new CacheSerializableRunnable("PrIndexCheck") {
       @Override
       public void run2() {
@@ -1361,7 +1318,7 @@ public class PRQueryDUnitHelper implements Serializable {
    * 3. Compares the appropriate resultSet <br>
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults(
       final String regionName, final String localRegion) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -1381,13 +1338,13 @@ public class PRQueryDUnitHelper implements Serializable {
             r[j][1] = region.query(query[j]);
           }
 
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Queries Executed successfully on Local region & PR Region");
 
           compareTwoQueryResults(r, query.length);
 
         } catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Caught an Exception while querying Constants"
                   + e,
               e);
@@ -1409,20 +1366,18 @@ public class PRQueryDUnitHelper implements Serializable {
 
   public CacheSerializableRunnable getCacheSerializableRunnableForPRAccessorCreate(
       final String regionName, final int redundancy, final Class constraint) {
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
+    SerializableRunnable createPrRegion = new CacheSerializableRunnable(regionName) {
       @Override
       public void run2() throws CacheException {
         Cache cache = getCache();
-        Region partitionedregion = null;
-        int maxMem = 0;
         AttributesFactory attr = new AttributesFactory();
         attr.setValueConstraint(constraint);
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        int maxMem = 0;
         PartitionAttributes prAttr =
             paf.setLocalMaxMemory(maxMem).setRedundantCopies(redundancy).create();
         attr.setPartitionAttributes(prAttr);
-        partitionedregion = cache.createRegion(regionName, attr.create());
+        Region partitionedregion = cache.createRegion(regionName, attr.create());
         assertNotNull(
             "PRQueryDUnitHelper#getCacheSerializableRunnableForPRAccessorCreate: Partitioned Region "
                 + regionName + " not in cache",
@@ -1446,25 +1401,21 @@ public class PRQueryDUnitHelper implements Serializable {
    * @param Object[][] @param length @return
    */
 
-  public void compareTwoQueryResults(Object[][] r, int len) {
-
-    Set set1 = null;
-    Set set2 = null;
-    ObjectType type1, type2;
+  private void compareTwoQueryResults(Object[][] r, int len) {
 
     for (int j = 0; j < len; j++) {
       if ((r[j][0] != null) && (r[j][1] != null)) {
-        type1 = ((SelectResults) r[j][0]).getCollectionType().getElementType();
+        ObjectType type1 = ((SelectResults) r[j][0]).getCollectionType().getElementType();
         assertNotNull("PRQueryDUnitHelper#compareTwoQueryResults: Type 1 is NULL " + type1, type1);
-        type2 = ((SelectResults) r[j][1]).getCollectionType().getElementType();
+        ObjectType type2 = ((SelectResults) r[j][1]).getCollectionType().getElementType();
         assertNotNull("PRQueryDUnitHelper#compareTwoQueryResults: Type 2 is NULL " + type2, type2);
         if ((type1.getClass().getName()).equals(type2.getClass().getName())) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#compareTwoQueryResults: Both Search Results are of the same Type i.e.--> "
                   + ((SelectResults) r[j][0]).getCollectionType().getElementType());
 
         } else {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
+          LogWriterUtils.getLogWriter()
               .error("PRQueryDUnitHelper#compareTwoQueryResults: Classes are : "
                   + type1.getClass().getName() + " " + type2.getClass().getName());
 
@@ -1474,20 +1425,20 @@ public class PRQueryDUnitHelper implements Serializable {
         int size0 = ((SelectResults) r[j][0]).size();
         int size1 = ((SelectResults) r[j][1]).size();
         if (size0 == size1) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#compareTwoQueryResults: Both Search Results are non-zero and are of Same Size i.e.  Size= "
                   + size1 + ";j=" + j);
 
         } else {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
                   + size0 + ";size1=" + size1 + ";j=" + j);
           fail(
               "PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
                   + size0 + ";size1=" + size1 + ";j=" + j);
         }
-        set2 = (((SelectResults) r[j][1]).asSet());
-        set1 = (((SelectResults) r[j][0]).asSet());
+        Set set2 = (((SelectResults) r[j][1]).asSet());
+        Set set1 = (((SelectResults) r[j][0]).asSet());
 
         assertEquals("PRQueryDUnitHelper#compareTwoQueryResults: FAILED: "
             + "result contents are not equal, ", set1, set2);
@@ -1506,7 +1457,7 @@ public class PRQueryDUnitHelper implements Serializable {
    * @return cacheSerializable object
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRInvalidQuery(
+  CacheSerializableRunnable getCacheSerializableRunnableForPRInvalidQuery(
       final String regionName) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -1515,11 +1466,10 @@ public class PRQueryDUnitHelper implements Serializable {
         Cache cache = getCache();
         // Querying the PR region with an Invalid query string
 
-        String query = "INVALID QUERY";
-
         Region region = cache.getRegion(regionName);
         try {
 
+          String query = "INVALID QUERY";
           region.query(query);
           fail(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRInvalidQuery: InvalidQueryException expected");
@@ -1527,7 +1477,7 @@ public class PRQueryDUnitHelper implements Serializable {
           // pass
         } catch (QueryException qe) {
 
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRInvalidQuery: Caught another Exception while querying , Exception is "
                   + qe,
               qe);
@@ -1550,24 +1500,24 @@ public class PRQueryDUnitHelper implements Serializable {
    * @return cacheSerializable object
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForRegionClose(
+  CacheSerializableRunnable getCacheSerializableRunnableForRegionClose(
       final String regionName, final int redundancy, final Class constraint) {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("regionClose") {
       @Override
       public void run2() throws CacheException {
         Cache cache = getCache();
         final String expectedRegionDestroyedException = RegionDestroyedException.class.getName();
-        final String expectedReplyException = ReplyException.class.getName();
         getCache().getLogger().info("<ExpectedException action=add>"
             + expectedRegionDestroyedException + "</ExpectedException>");
+        final String expectedReplyException = ReplyException.class.getName();
         getCache().getLogger().info(
             "<ExpectedException action=add>" + expectedReplyException + "</ExpectedException>");
 
         Region region = cache.getRegion(regionName);
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Closing region");
         region.close();
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Closed on VM ");
         AttributesFactory attr = new AttributesFactory();
         attr.setValueConstraint(constraint);
@@ -1575,7 +1525,7 @@ public class PRQueryDUnitHelper implements Serializable {
         PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
         attr.setPartitionAttributes(prAttr);
         cache.createRegion(regionName, attr.create());
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Recreated on VM ");
         getCache().getLogger().info(
             "<ExpectedException action=remove>" + expectedReplyException + "</ExpectedException>");
@@ -1639,7 +1589,7 @@ public class PRQueryDUnitHelper implements Serializable {
   /**
    * This function defines a appropriate index on a PR given the name and other parameters.
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForDefineIndex(
+  CacheSerializableRunnable getCacheSerializableRunnableForDefineIndex(
       final String prRegionName, final ArrayList<String> indexName,
       final ArrayList<String> indexedExpression) {
     return getCacheSerializableRunnableForDefineIndex(prRegionName, indexName, indexedExpression,
@@ -1680,7 +1630,7 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable) prIndexCreator;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForRRIndexCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForRRIndexCreate(
       final String rrRegionName, final String indexName, final String indexedExpression,
       final String fromClause, final String alias) {
 
@@ -1754,13 +1704,13 @@ public class PRQueryDUnitHelper implements Serializable {
   }
 
 
-  public File findFile(String fileName) {
+  File findFile(String fileName) {
     return new File(
         createTempFileFromResource(PRQueryDUnitHelper.class, fileName)
             .getAbsolutePath());
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForIndexCreationCheck(
+  CacheSerializableRunnable getCacheSerializableRunnableForIndexCreationCheck(
       final String name) {
     return new CacheSerializableRunnable("PrIndexCreationCheck") {
       @Override
@@ -1790,7 +1740,7 @@ public class PRQueryDUnitHelper implements Serializable {
    * This function creates a duplicate index should throw an IndexNameConflictException and if not
    * the test should fail.
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForDuplicatePRIndexCreate(
+  CacheSerializableRunnable getCacheSerializableRunnableForDuplicatePRIndexCreate(
       final String prRegionName, final String indexName, final String indexedExpression,
       final String fromClause, final String alias) {
     SerializableRunnable prIndexCreator =
@@ -1832,7 +1782,7 @@ public class PRQueryDUnitHelper implements Serializable {
    * @param name name of the partitioned regions
    */
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForRemoveIndex(final String name,
+  CacheSerializableRunnable getCacheSerializableRunnableForRemoveIndex(final String name,
       final boolean random) {
     return new CacheSerializableRunnable("PrRemoveIndex") {
       @Override
@@ -1878,7 +1828,7 @@ public class PRQueryDUnitHelper implements Serializable {
     };
   }
 
-  public SerializableRunnableIF getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(
+  SerializableRunnableIF getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(
       final String name, final String coloName, final String localName,
       final String coloLocalName) {
 
@@ -1903,8 +1853,7 @@ public class PRQueryDUnitHelper implements Serializable {
             "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)",};
 
         Object r[][] = new Object[queries.length][2];
-        Region region = null;
-        region = cache.getRegion(name);
+        Region region = cache.getRegion(name);
         assertNotNull(region);
         region = cache.getRegion(coloName);
         assertNotNull(region);
@@ -1948,7 +1897,7 @@ public class PRQueryDUnitHelper implements Serializable {
                 .execute();
             r[j][1] = r2.asList();
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           // compareTwoQueryResults(r, queries.length);
@@ -1963,7 +1912,7 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -1971,12 +1920,12 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -1993,7 +1942,7 @@ public class PRQueryDUnitHelper implements Serializable {
 
   }
 
-  public SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryAndCompareResults(
+  SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryAndCompareResults(
       final String name, final String coloName, final String localName,
       final String coloLocalName) {
 
@@ -2018,8 +1967,7 @@ public class PRQueryDUnitHelper implements Serializable {
             "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)",};
 
         Object r[][] = new Object[queries.length][2];
-        Region region = null;
-        region = cache.getRegion(name);
+        Region region = cache.getRegion(name);
         assertNotNull(region);
         region = cache.getRegion(coloName);
         assertNotNull(region);
@@ -2062,7 +2010,7 @@ public class PRQueryDUnitHelper implements Serializable {
                 .execute();
             r[j][1] = r2.asList();
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           // compareTwoQueryResults(r, queries.length);
@@ -2077,7 +2025,7 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -2085,12 +2033,12 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -2108,7 +2056,7 @@ public class PRQueryDUnitHelper implements Serializable {
   }
 
 
-  public SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryWithCompactAndRangeIndexAndCompareResults(
+  SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryWithCompactAndRangeIndexAndCompareResults(
       final String name, final String coloName, final String localName,
       final String coloLocalName) {
 
@@ -2132,8 +2080,7 @@ public class PRQueryDUnitHelper implements Serializable {
             "r1.ID = pos2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)",};
 
         Object r[][] = new Object[queries.length][2];
-        Region region = null;
-        region = cache.getRegion(name);
+        Region region = cache.getRegion(name);
         assertNotNull(region);
         region = cache.getRegion(coloName);
         assertNotNull(region);
@@ -2175,7 +2122,7 @@ public class PRQueryDUnitHelper implements Serializable {
                 .execute();
             r[j][1] = r2.asList();
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
@@ -2189,7 +2136,7 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -2197,12 +2144,12 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -2248,8 +2195,7 @@ public class PRQueryDUnitHelper implements Serializable {
             "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)",};
 
         Object r[][] = new Object[queries.length][2];
-        Region region = null;
-        region = cache.getRegion(name);
+        Region region = cache.getRegion(name);
         assertNotNull(region);
         region = cache.getRegion(coloName);
         assertNotNull(region);
@@ -2291,7 +2237,7 @@ public class PRQueryDUnitHelper implements Serializable {
                 .execute();
             r[j][1] = r2.asList();
           }
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
@@ -2305,7 +2251,7 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error(
+          LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                   + e,
               e);
@@ -2313,12 +2259,12 @@ public class PRQueryDUnitHelper implements Serializable {
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         } catch (CancelException cce) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
 
@@ -2332,10 +2278,8 @@ public class PRQueryDUnitHelper implements Serializable {
       }
     };
     return PrRegion;
-
   }
 
-  // Helper classes and function
   public static class TestQueryFunction implements Function {
 
     @Override
@@ -2350,8 +2294,7 @@ public class PRQueryDUnitHelper implements Serializable {
 
     private final String id;
 
-    public TestQueryFunction(String id) {
-      super();
+    TestQueryFunction(String id) {
       this.id = id;
     }
 
@@ -2363,21 +2306,20 @@ public class PRQueryDUnitHelper implements Serializable {
       try {
         Query query = queryService.newQuery(qstr);
         context.getResultSender().sendResult(
-            (ArrayList) ((SelectResults) query.execute((RegionFunctionContext) context)).asList());
+            ((SelectResults) query.execute((RegionFunctionContext) context)).asList());
         context.getResultSender().lastResult(null);
       } catch (Exception e) {
-        e.printStackTrace();
         throw new FunctionException(e);
       }
     }
 
     @Override
     public String getId() {
-      return this.id;
+      return id;
     }
   }
 
-  public SerializableRunnable getCacheSerializableRunnableForCloseCache() {
+  SerializableRunnable getCacheSerializableRunnableForCloseCache() {
     return new SerializableRunnable() {
       @Override
       public void run() {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
index a85b65d..4954dd7 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
@@ -15,6 +15,7 @@
 package org.apache.geode.cache30;
 
 import static org.apache.geode.distributed.ConfigurationProperties.ROLES;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -278,7 +279,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     attrs.setPoolName("mypool");
     attrs.setDataPolicy(DataPolicy.EMPTY); // required for multiuser mode
     cache.createVMRegion("rootNORMAL", attrs);
-    IgnoredException.addIgnoredException("Connection refused: connect");
+    addIgnoredException("Connection refused: connect");
     testXml(cache);
     Cache c = getCache();
     assertNotNull(c);
@@ -1471,8 +1472,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     CacheTransactionManagerCreation txMgrCreation = new CacheTransactionManagerCreation();
     txMgrCreation.setWriter(new TestTransactionWriter());
     cc.addCacheTransactionManagerCreation(txMgrCreation);
-    IgnoredException expectedException = IgnoredException
-        .addIgnoredException("A TransactionWriter cannot be registered on a client");
+    IgnoredException expectedException =
+        addIgnoredException("A TransactionWriter cannot be registered on a client");
     try {
       testXml(cc);
       fail("expected exception not thrown");
@@ -1668,7 +1669,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     rmc.setEvictionHeapPercentage(high);
     rmc.setCriticalHeapPercentage(low);
     cache.setResourceManagerCreation(rmc);
-    IgnoredException expectedException = IgnoredException.addIgnoredException(
+    IgnoredException expectedException = addIgnoredException(
         "Eviction percentage must be less than the critical percentage.");
     try {
       testXml(cache);
@@ -1719,18 +1720,18 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   // A bunch of classes for use in testing the serialization schtuff
   public static class DS1 implements DataSerializable {
     @Override
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {}
+    public void fromData(DataInput in) {}
 
     @Override
-    public void toData(DataOutput out) throws IOException {}
+    public void toData(DataOutput out) {}
   };
 
   public static class DS2 implements DataSerializable {
     @Override
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {}
+    public void fromData(DataInput in) {}
 
     @Override
-    public void toData(DataOutput out) throws IOException {}
+    public void toData(DataOutput out) {}
   };
 
   public static class NotDataSerializable implements Serializable {
@@ -1740,7 +1741,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     public GoodSerializer() {}
 
     @Override
-    public Object fromData(DataInput in) throws IOException, ClassNotFoundException {
+    public Object fromData(DataInput in) {
       return null;
     }
 
@@ -1755,14 +1756,14 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     }
 
     @Override
-    public boolean toData(Object o, DataOutput out) throws IOException {
+    public boolean toData(Object o, DataOutput out) {
       return false;
     }
   }
 
   public static class BadSerializer extends DataSerializer {
     @Override
-    public Object fromData(DataInput in) throws IOException, ClassNotFoundException {
+    public Object fromData(DataInput in) {
       return null;
     }
 
@@ -1777,7 +1778,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     }
 
     @Override
-    public boolean toData(Object o, DataOutput out) throws IOException {
+    public boolean toData(Object o, DataOutput out) {
       return false;
     }
   }
@@ -1806,7 +1807,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     cc.setSerializerCreation(sc);
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       testXml(cc);
       fail("Instantiator should not have registered due to bad class.");
@@ -1821,7 +1822,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     cc.setSerializerCreation(sc);
 
     IgnoredException expectedException1 =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       testXml(cc);
       fail("Serializer should not have registered due to bad class.");
@@ -2384,7 +2385,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
     attrs.setPoolName("mypool");
     cache.createVMRegion("rootNORMAL", attrs);
-    IgnoredException expectedException = IgnoredException.addIgnoredException(
+    IgnoredException expectedException = addIgnoredException(
         String.format("The connection pool %s has not been created",
             "mypool"));
     try {
@@ -2405,7 +2406,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
       // now make sure declarative cache can't create the same pool
       CacheCreation cache = new CacheCreation();
       cache.createPoolFactory().addLocator(ALIAS2, 12345).create("mypool");
-      IgnoredException expectedException = IgnoredException.addIgnoredException(
+      IgnoredException expectedException = addIgnoredException(
           String.format("A pool named %s already exists", "mypool"));
       try {
         testXml(cache);
@@ -2421,10 +2422,10 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
 
   @Test
   public void testDynamicRegionFactoryConnectionPool() throws Exception, IOException {
-    IgnoredException.addIgnoredException("Connection reset");
-    IgnoredException.addIgnoredException("SocketTimeoutException");
-    IgnoredException.addIgnoredException("ServerConnectivityException");
-    IgnoredException.addIgnoredException("Socket Closed");
+    addIgnoredException("Connection reset");
+    addIgnoredException("SocketTimeoutException");
+    addIgnoredException("ServerConnectivityException");
+    addIgnoredException("Socket Closed");
     getSystem();
     VM vm0 = Host.getHost(0).getVM(0);
     final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
@@ -2845,7 +2846,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
    * leaving the functionality for future comparisons (by hand of course).
    */
   @Test
-  public void testPartitionedRegionInstantiation() throws Exception {
+  public void testPartitionedRegionInstantiation() {
     CacheCreation cache = new CacheCreation();
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
 
@@ -3051,8 +3052,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   @Override
   public Properties getDistributedSystemProperties() {
     Properties props = super.getDistributedSystemProperties();
-    if (this.xmlProps != null) {
-      for (Iterator iter = this.xmlProps.entrySet().iterator(); iter.hasNext();) {
+    if (xmlProps != null) {
+      for (Iterator iter = xmlProps.entrySet().iterator(); iter.hasNext();) {
         Map.Entry entry = (Map.Entry) iter.next();
         String key = (String) entry.getKey();
         String value = (String) entry.getValue();
@@ -3103,13 +3104,13 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
       // will not think the a required role is missing
       Properties config = new Properties();
       config.setProperty(ROLES, MY_ROLES);
-      this.xmlProps = config;
+      xmlProps = config;
     }
     DistributedRegion.ignoreReconnect = true;
     try {
       testXml(cache);
     } finally {
-      this.xmlProps = null;
+      xmlProps = null;
       try {
         preTearDown();
       } finally {
@@ -3194,7 +3195,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     factory.setEvictionAttributes(ev);
     // RegionAttributes atts = factory.create();
     createRegion(name, factory.create());
-    finishCacheXml(this.temporaryFolder.getRoot(), getUniqueName(), getUseSchema(),
+    finishCacheXml(temporaryFolder.getRoot(), getUniqueName(), getUseSchema(),
         getGemFireVersion());
     Region r = getRootRegion().getSubregion(name);
 
@@ -3346,7 +3347,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   public void testUnknownNamedAttributes() throws Exception {
     setXmlFile(findFile("unknownNamedAttributes.xml"));
 
-    IgnoredException expectedException = IgnoredException.addIgnoredException(
+    IgnoredException expectedException = addIgnoredException(
         "Cannot reference non-existing region attributes named");
     try {
       getCache();
@@ -3381,8 +3382,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
 
     setXmlFile(findFile("sameRootRegion.xml"));
 
-    IgnoredException.addIgnoredException("While reading Cache XML file");
-    IgnoredException.addIgnoredException("org.apache.geode.cache.RegionExistsException");
+    addIgnoredException("While reading Cache XML file");
+    addIgnoredException("org.apache.geode.cache.RegionExistsException");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -3407,7 +3408,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   public void testCreateSameSubregionTwice() throws Exception {
     CacheCreation cache = new CacheCreation();
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
-    String name = this.getUniqueName();
+    String name = getUniqueName();
 
     Region root = cache.createRegion("root", attrs);
 
@@ -3423,8 +3424,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
 
     setXmlFile(findFile("sameSubregion.xml"));
 
-    IgnoredException.addIgnoredException("While reading Cache XML file");
-    IgnoredException.addIgnoredException("org.apache.geode.cache.RegionExistsException");
+    addIgnoredException("While reading Cache XML file");
+    addIgnoredException("org.apache.geode.cache.RegionExistsException");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -3870,15 +3871,15 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
    * Tests creating a cache with a non-existent XML file
    */
   @Test
-  public void testNonExistentFile() throws Exception {
+  public void testNonExistentFile() {
     // System.out.println("testNonExistentFile - start: " + System.currentTimeMillis());
-    File nonExistent = new File(this.getName() + ".xml");
+    File nonExistent = new File(getName() + ".xml");
     nonExistent.delete();
     // System.out.println("testNonExistentFile - deleted: " + System.currentTimeMillis());
     setXmlFile(nonExistent);
     // System.out.println("testNonExistentFile - set: " + System.currentTimeMillis());
 
-    IgnoredException expectedException = IgnoredException.addIgnoredException(
+    IgnoredException expectedException = addIgnoredException(
         String.format("Declarative Cache XML file/resource %s does not exist.",
             nonExistent.getPath()));
     try {
@@ -3897,13 +3898,13 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
    * Tests creating a cache with a XML file that is a directory
    */
   @Test
-  public void testXmlFileIsDirectory() throws Exception {
-    File dir = new File(this.getName() + "dir");
+  public void testXmlFileIsDirectory() {
+    File dir = new File(getName() + "dir");
     dir.mkdirs();
     dir.deleteOnExit();
     setXmlFile(dir);
 
-    IgnoredException expectedException = IgnoredException.addIgnoredException(
+    IgnoredException expectedException = addIgnoredException(
         String.format("Declarative XML file %s is not a file.", dir));
     try {
       getCache();
@@ -4080,7 +4081,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("malformed.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4100,7 +4101,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("badInt.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4123,7 +4124,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("badFloat.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4143,7 +4144,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("badScope.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4163,7 +4164,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("badKeyConstraintClass.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4194,7 +4195,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("callbackWithException.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4217,7 +4218,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     setXmlFile(findFile("loaderNotLoader.xml"));
 
     IgnoredException expectedException =
-        IgnoredException.addIgnoredException("While reading Cache XML file");
+        addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -4290,8 +4291,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     CacheCreation cache = new CacheCreation();
 
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
-    File[] dirs = new File[] {new File(this.getUniqueName() + "-dir1"),
-        new File(this.getUniqueName() + "-dir2")};
+    File[] dirs = new File[] {new File(getUniqueName() + "-dir1"),
+        new File(getUniqueName() + "-dir2")};
     for (int i = 0; i < dirs.length; i++) {
       dirs[i].mkdirs();
       dirs[i].deleteOnExit();
@@ -4363,7 +4364,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   @Ignore // TODO: why is testExampleCacheXmlFile @Ignored?
   public void testExampleCacheXmlFile() throws Exception {
     // Check for old example files
-    String dirName = "examples_" + this.getGemFireVersion();
+    String dirName = "examples_" + getGemFireVersion();
     File dir = null;
     try {
       dir = findFile(dirName);
@@ -4426,11 +4427,11 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
 
     @Override
     public Properties getConfig() {
-      if (null == this.props) {
-        this.props = new Properties();
+      if (null == props) {
+        props = new Properties();
       }
-      this.props.setProperty("EvictionObjectSizerColor", "blue");
-      return this.props;
+      props.setProperty("EvictionObjectSizerColor", "blue");
+      return props;
     }
 
     @Override
@@ -4447,7 +4448,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
         return false;
       }
       EvictionObjectSizer other = (EvictionObjectSizer) obj;
-      if (!this.props.equals(other.props)) {
+      if (!props.equals(other.props)) {
         return false;
       }
       return true;
@@ -4562,7 +4563,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
      * Creates a new loader and initializes its properties
      */
     public CacheLoaderWithDeclarables() {
-      this.props = new Properties();
+      props = new Properties();
       props.put("KEY1", "VALUE1");
       props.put("KEY2", new TestDeclarable());
     }
@@ -4571,18 +4572,18 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
      * Returns whether or not this {@code Declarable} was initialized.
      */
     public boolean isInitialized() {
-      return this.initialized;
+      return initialized;
     }
 
     @Override
     public void init(Properties props) {
-      this.initialized = true;
+      initialized = true;
       assertEquals(this.props, props);
     }
 
     @Override
     public Properties getConfig() {
-      return this.props;
+      return props;
     }
 
     @Override
@@ -4595,7 +4596,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     public boolean equals(Object o) {
       if (o instanceof CacheLoaderWithDeclarables) {
         CacheLoaderWithDeclarables other = (CacheLoaderWithDeclarables) o;
-        return this.props.equals(other.props);
+        return props.equals(other.props);
 
       } else {
         return false;
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ReconnectWithCacheXMLDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ReconnectWithCacheXMLDUnitTest.java
index c8fd582..2f886e2 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ReconnectWithCacheXMLDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ReconnectWithCacheXMLDUnitTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.util.Properties;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -35,6 +36,7 @@ import org.apache.geode.distributed.internal.membership.MembershipTestHook;
 import org.apache.geode.distributed.internal.membership.gms.MembershipManagerHelper;
 import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.categories.MembershipTest;
 
@@ -44,15 +46,15 @@ import org.apache.geode.test.junit.categories.MembershipTest;
  * GEODE-2732.
  */
 @Category({MembershipTest.class, ClientServerTest.class})
+@SuppressWarnings("serial")
 public class ReconnectWithCacheXMLDUnitTest extends JUnit4CacheTestCase {
-  private static final long serialVersionUID = 1L;
+
   private String xmlProperty = DistributionConfig.GEMFIRE_PREFIX + "autoReconnect-useCacheXMLFile";
   private String oldPropertySetting;
 
-
-  public ReconnectWithCacheXMLDUnitTest() {
-    super();
-  }
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
 
   @Override
   public final void postSetUp() {
@@ -74,10 +76,10 @@ public class ReconnectWithCacheXMLDUnitTest extends JUnit4CacheTestCase {
     String fileName =
         createTempFileFromResource(getClass(), "ReconnectWithCacheXMLDUnitTest.xml")
             .getAbsolutePath();
-    result.put(ConfigurationProperties.CACHE_XML_FILE, fileName);
-    result.put(ConfigurationProperties.ENABLE_NETWORK_PARTITION_DETECTION, "true");
-    result.put(ConfigurationProperties.DISABLE_AUTO_RECONNECT, "false");
-    result.put(ConfigurationProperties.MAX_WAIT_TIME_RECONNECT, "2000");
+    result.setProperty(ConfigurationProperties.CACHE_XML_FILE, fileName);
+    result.setProperty(ConfigurationProperties.ENABLE_NETWORK_PARTITION_DETECTION, "true");
+    result.setProperty(ConfigurationProperties.DISABLE_AUTO_RECONNECT, "false");
+    result.setProperty(ConfigurationProperties.MAX_WAIT_TIME_RECONNECT, "2000");
     return result;
   }
 
@@ -87,15 +89,12 @@ public class ReconnectWithCacheXMLDUnitTest extends JUnit4CacheTestCase {
         .withDisableDefaultServer(true);
     Cache cache = getCache();
 
-    final AtomicBoolean membershipFailed = new AtomicBoolean();
+    AtomicBoolean membershipFailed = new AtomicBoolean();
     MembershipManagerHelper.addTestHook(cache.getDistributedSystem(), new MembershipTestHook() {
       @Override
       public void beforeMembershipFailure(String reason, Throwable cause) {
         membershipFailed.set(true);
       }
-
-      @Override
-      public void afterMembershipFailure(String reason, Throwable cause) {}
     });
     MembershipManagerHelper.crashDistributedSystem(cache.getDistributedSystem());
     assertTrue(membershipFailed.get());
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorDUnitTest.java
index caf04de..4381ef7 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorDUnitTest.java
@@ -39,8 +39,15 @@ import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTOR
 import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE_PASSWORD;
 import static org.apache.geode.distributed.ConfigurationProperties.START_LOCATOR;
 import static org.apache.geode.distributed.ConfigurationProperties.USE_CLUSTER_CONFIGURATION;
-import static org.apache.geode.internal.logging.LogWriterLevel.ALL;
+import static org.apache.geode.internal.security.SecurableCommunicationChannel.LOCATOR;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.Disconnect.disconnectFromDS;
+import static org.apache.geode.test.dunit.DistributedTestUtils.deleteLocatorStateFile;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.VM.getController;
+import static org.apache.geode.test.dunit.VM.getHostName;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.apache.geode.test.dunit.VM.toArray;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
@@ -52,12 +59,13 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.Serializable;
+import java.net.ConnectException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.logging.log4j.Logger;
 import org.junit.After;
@@ -68,7 +76,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.ForcedDisconnectException;
 import org.apache.geode.GemFireConfigException;
-import org.apache.geode.LogWriter;
 import org.apache.geode.SystemConnectException;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -89,17 +96,12 @@ import org.apache.geode.distributed.internal.membership.gms.MembershipManagerHel
 import org.apache.geode.distributed.internal.tcpserver.LocatorCancelException;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.logging.LocalLogWriter;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.geode.internal.security.SecurableCommunicationChannel;
 import org.apache.geode.internal.tcp.Connection;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.DUnitBlackboard;
 import org.apache.geode.test.dunit.DistributedTestUtils;
-import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.NetworkUtils;
 import org.apache.geode.test.dunit.RMIException;
 import org.apache.geode.test.dunit.SerializableRunnable;
@@ -107,6 +109,7 @@ import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.internal.DUnitLauncher;
 import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.dunit.rules.SharedErrorCollector;
 import org.apache.geode.test.junit.categories.MembershipTest;
 
 /**
@@ -114,20 +117,73 @@ import org.apache.geode.test.junit.categories.MembershipTest;
  *
  * @since GemFire 4.0
  */
-@Category({MembershipTest.class})
-public class LocatorDUnitTest implements java.io.Serializable {
+@Category(MembershipTest.class)
+@SuppressWarnings("serial")
+public class LocatorDUnitTest implements Serializable {
   private static final Logger logger = LogService.getLogger();
 
-  private static DUnitBlackboard blackboard;
-  private static TestHook hook;
-  static volatile InternalDistributedSystem system = null;
+  protected static volatile InternalDistributedSystem system;
+  private static volatile DUnitBlackboard blackboard;
+  private static volatile TestHook hook;
+
   protected int port1;
   private int port2;
+  private int port3;
+  private int port4;
+
+  protected String hostName;
+
+  protected VM vm0;
+  private VM vm1;
+  private VM vm2;
+  private VM vm3;
+  private VM vm4;
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(6);
 
   @Rule
-  public DistributedRule distributedRule =
-      DistributedRule.builder().withVMCount(6).build();
+  public SharedErrorCollector errorCollector = new SharedErrorCollector();
+
+  @Before
+  public void setUp() {
+    addIgnoredException("Removing shunned member");
+
+    vm0 = getVM(0);
+    vm1 = getVM(1);
+    vm2 = getVM(2);
+    vm3 = getVM(3);
+    vm4 = getVM(4);
+
+    hostName = NetworkUtils.getServerHostName();
+
+    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(4);
+    port1 = ports[0];
+    port2 = ports[1];
+    port3 = ports[2];
+    port4 = ports[3];
+    deleteLocatorStateFile(port1, port2, port3, port4);
+  }
+
+  @After
+  public void tearDown() {
+    for (VM vm : toArray(getController(), vm0, vm1, vm2, vm3, vm4)) {
+      vm.invoke(() -> {
+        stopLocator();
+        disconnectFromDS();
+        system = null;
+        blackboard = null;
+        hook = null;
+      });
+    }
 
+    // delete locator state files so they don't accidentally get used by other tests
+    for (int port : new int[] {port1, port2, port3, port4}) {
+      if (port > 0) {
+        deleteLocatorStateFile(port);
+      }
+    }
+  }
 
   private static DUnitBlackboard getBlackboard() {
     if (blackboard == null) {
@@ -153,18 +209,14 @@ public class LocatorDUnitTest implements java.io.Serializable {
     MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
   }
 
-
   /**
    * return the distributed member id for the ds on this vm
    */
   private static DistributedMember getDistributedMember(Properties props) {
-    props.put("name", "vm_" + VM.getCurrentVMNum());
+    props.setProperty("name", "vm_" + VM.getCurrentVMNum());
     DistributedSystem sys = getConnectedDistributedSystem(props);
-    sys.getLogWriter().info("<ExpectedException action=add>service failure</ExpectedException>");
-    sys.getLogWriter().info(
-        "<ExpectedException action=add>org.apache.geode.ConnectException</ExpectedException>");
-    sys.getLogWriter().info(
-        "<ExpectedException action=add>org.apache.geode.ForcedDisconnectException</ExpectedException>");
+    addIgnoredException("service failure");
+    addIgnoredException(ForcedDisconnectException.class);
     return sys.getDistributedMember();
   }
 
@@ -172,7 +224,7 @@ public class LocatorDUnitTest implements java.io.Serializable {
    * find a running locator and return its distributed member id
    */
   private static DistributedMember getLocatorDistributedMember() {
-    return (Locator.getLocator()).getDistributedSystem().getDistributedMember();
+    return Locator.getLocator().getDistributedSystem().getDistributedMember();
   }
 
   /**
@@ -191,8 +243,6 @@ public class LocatorDUnitTest implements java.io.Serializable {
     }
   }
 
-  //////// Test Methods
-
   /**
    * This tests that the locator can resume control as coordinator after all locators have been shut
    * down and one is restarted. It's necessary to have a lock service start so elder failover is
@@ -201,29 +251,24 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testCollocatedLocatorWithSecurity() {
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM vm3 = VM.getVM(3);
-
-    port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-
-    final String locators = NetworkUtils.getServerHostName() + "[" + port1 + "]";
-    final Properties properties = new Properties();
-    properties.put(MCAST_PORT, "0");
-    properties.put(START_LOCATOR, locators);
-    properties.put(LOG_LEVEL, logger.getLevel().name());
-    properties.put(SECURITY_PEER_AUTH_INIT, "org.apache.geode.distributed.AuthInitializer.create");
-    properties.put(SECURITY_PEER_AUTHENTICATOR,
+    String locators = hostName + "[" + port1 + "]";
+
+    Properties properties = new Properties();
+    properties.setProperty(SECURITY_PEER_AUTH_INIT,
+        "org.apache.geode.distributed.AuthInitializer.create");
+    properties.setProperty(SECURITY_PEER_AUTHENTICATOR,
         "org.apache.geode.distributed.MyAuthenticator.create");
+    properties.setProperty(START_LOCATOR, locators);
     addDSProps(properties);
+
     system = getConnectedDistributedSystem(properties);
     assertThat(system.getDistributedMember().getVmKind())
         .describedAs("expected the VM to have NORMAL vmKind")
         .isEqualTo(ClusterDistributionManager.NORMAL_DM_TYPE);
 
     properties.remove(START_LOCATOR);
-    properties.put(LOCATORS, locators);
+    properties.setProperty(LOCATORS, locators);
+
     SerializableRunnable startSystem = new SerializableRunnable("start system") {
       @Override
       public void run() {
@@ -253,15 +298,14 @@ public class LocatorDUnitTest implements java.io.Serializable {
     system.disconnect();
 
     vm1.invoke("ensure grantor failover", () -> {
-      DistributedLockService serviceNamed =
-          DistributedLockService.getServiceNamed("test service");
+      DistributedLockService serviceNamed = DistributedLockService.getServiceNamed("test service");
       serviceNamed.lock("foo3", 0, 0);
-      await()
-          .until(serviceNamed::isLockGrantor);
+      await().until(serviceNamed::isLockGrantor);
       assertThat(serviceNamed.isLockGrantor()).isTrue();
     });
 
-    properties.put(START_LOCATOR, locators);
+    properties.setProperty(START_LOCATOR, locators);
+
     system = getConnectedDistributedSystem(properties);
     System.out.println("done connecting distributed system.  Membership view is "
         + MembershipManagerHelper.getMembershipManager(system).getView());
@@ -289,7 +333,6 @@ public class LocatorDUnitTest implements java.io.Serializable {
     vm3.invoke(startSystem);
     vm3.invoke("get the lock service and lock something(2)",
         () -> DistributedLockService.create("test service", system).lock("foo5", 0, 0));
-
   }
 
   /**
@@ -300,232 +343,172 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testStartTwoLocators() throws Exception {
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    this.port2 = port2; // for cleanup in tearDown2
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    DistributedTestUtils.deleteLocatorStateFile(port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "false");
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = getClusterProperties(locators, "false");
     addDSProps(properties);
 
-    startVerifyAndStopLocator(loc1, loc2, port1, port2, properties);
-    startVerifyAndStopLocator(loc1, loc2, port1, port2, properties);
-    startVerifyAndStopLocator(loc1, loc2, port1, port2, properties);
+    startVerifyAndStopLocator(vm1, vm2, port1, port2, properties);
+    startVerifyAndStopLocator(vm1, vm2, port1, port2, properties);
+    startVerifyAndStopLocator(vm1, vm2, port1, port2, properties);
   }
 
   @Test
   public void testStartTwoLocatorsWithSingleKeystoreSSL() throws Exception {
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    this.port2 = port2; // for cleanup in tearDown2
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    DistributedTestUtils.deleteLocatorStateFile(port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "false");
-    properties.put(SSL_CIPHERS, "any");
-    properties.put(SSL_PROTOCOLS, "TLSv1,TLSv1.1,TLSv1.2");
-    properties.put(SSL_KEYSTORE, getSingleKeyKeystore());
-    properties.put(SSL_KEYSTORE_PASSWORD, "password");
-    properties.put(SSL_KEYSTORE_TYPE, "JKS");
-    properties.put(SSL_TRUSTSTORE, getSingleKeyKeystore());
-    properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-    properties.put(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannel.LOCATOR.getConstant());
-
-    startVerifyAndStopLocator(loc1, loc2, port1, port2, properties);
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = getClusterProperties(locators, "false");
+    properties.setProperty(SSL_CIPHERS, "any");
+    properties.setProperty(SSL_ENABLED_COMPONENTS, LOCATOR.getConstant());
+    properties.setProperty(SSL_KEYSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    properties.setProperty(SSL_PROTOCOLS, "TLSv1,TLSv1.1,TLSv1.2");
+    properties.setProperty(SSL_TRUSTSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+
+    startVerifyAndStopLocator(vm1, vm2, port1, port2, properties);
   }
 
   @Test
   public void testStartTwoLocatorsWithMultiKeystoreSSL() throws Exception {
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    this.port2 = port2; // for cleanup in tearDown2
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    DistributedTestUtils.deleteLocatorStateFile(port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "false");
-    properties.put(SSL_CIPHERS, "any");
-    properties.put(SSL_PROTOCOLS, "any");
-    properties.put(SSL_KEYSTORE, getMultiKeyKeystore());
-    properties.put(SSL_KEYSTORE_PASSWORD, "password");
-    properties.put(SSL_KEYSTORE_TYPE, "JKS");
-    properties.put(SSL_TRUSTSTORE, getMultiKeyTruststore());
-    properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-    properties.put(SSL_LOCATOR_ALIAS, "locatorkey");
-    properties.put(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannel.LOCATOR.getConstant());
-
-    startVerifyAndStopLocator(loc1, loc2, port1, port2, properties);
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = getClusterProperties(locators, "false");
+    properties.setProperty(SSL_CIPHERS, "any");
+    properties.setProperty(SSL_ENABLED_COMPONENTS, LOCATOR.getConstant());
+    properties.setProperty(SSL_KEYSTORE, getMultiKeyKeystore());
+    properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    properties.setProperty(SSL_TRUSTSTORE, getMultiKeyTruststore());
+    properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_LOCATOR_ALIAS, "locatorkey");
+    properties.setProperty(SSL_PROTOCOLS, "any");
+
+    startVerifyAndStopLocator(vm1, vm2, port1, port2, properties);
   }
 
   @Test
   public void testNonSSLLocatorDiesWhenConnectingToSSLLocator() {
-    IgnoredException.addIgnoredException("Unrecognized SSL message, plaintext connection");
-    IgnoredException.addIgnoredException("LocatorCancelException");
-
-    final String hostname = NetworkUtils.getServerHostName();
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
-    port1 = -1;
-    port2 = -1;
-    final Properties properties = new Properties();
-    properties.put(MCAST_PORT, "0");
-    properties.put(ENABLE_NETWORK_PARTITION_DETECTION, "false");
-    properties.put(DISABLE_AUTO_RECONNECT, "true");
-    properties.put(MEMBER_TIMEOUT, "2000");
-    properties.put(LOG_LEVEL, logger.getLevel().name());
-    properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
-    properties.put(USE_CLUSTER_CONFIGURATION, "false");
-    properties.put(SSL_CIPHERS, "any");
-    properties.put(SSL_PROTOCOLS, "any");
-    properties.put(SSL_KEYSTORE, getSingleKeyKeystore());
-    properties.put(SSL_KEYSTORE_PASSWORD, "password");
-    properties.put(SSL_KEYSTORE_TYPE, "JKS");
-    properties.put(SSL_TRUSTSTORE, getSingleKeyKeystore());
-    properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-    properties.put(SSL_REQUIRE_AUTHENTICATION, "true");
-    properties.put(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannel.LOCATOR.getConstant());
+    addIgnoredException("Unrecognized SSL message, plaintext connection");
+    addIgnoredException(LocatorCancelException.class);
+
+    Properties properties = new Properties();
+    properties.setProperty(DISABLE_AUTO_RECONNECT, "true");
+    properties.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "false");
+    properties.setProperty(MEMBER_TIMEOUT, "2000");
+    properties.setProperty(USE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(SSL_CIPHERS, "any");
+    properties.setProperty(SSL_ENABLED_COMPONENTS, LOCATOR.getConstant());
+    properties.setProperty(SSL_KEYSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    properties.setProperty(SSL_PROTOCOLS, "any");
+    properties.setProperty(SSL_REQUIRE_AUTHENTICATION, "true");
+    properties.setProperty(SSL_TRUSTSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
 
     // we set port1 so that the state file gets cleaned up later.
-    port1 = startLocatorGetPort(loc1, properties, 0);
+    port1 = startLocatorGetPort(vm1, properties, 0);
 
-    loc1.invoke("expect only one member in system",
+    vm1.invoke("expect only one member in system",
         () -> expectSystemToContainThisManyMembers(1));
 
+    properties.setProperty(LOCATORS, hostName + "[" + port1 + "]");
     properties.remove(SSL_ENABLED_COMPONENTS);
-    properties.put(LOCATORS, hostname + "[" + port1 + "]");
 
     // we set port2 so that the state file gets cleaned up later.
-    loc2.invoke(() -> {
+    vm2.invoke(() -> {
       assertThatThrownBy(() -> startLocatorBase(properties, 0))
           .isInstanceOfAny(LocatorCancelException.class, SystemConnectException.class);
 
       assertThat(Locator.getLocator()).isNull();
     });
 
-    loc1.invoke("expect only one member in system",
+    vm1.invoke("expect only one member in system",
         () -> expectSystemToContainThisManyMembers(1));
 
-    loc1.invoke("stop locator", LocatorDUnitTest::stopLocator);
+    vm1.invoke("stop locator", LocatorDUnitTest::stopLocator);
   }
 
   @Test
   public void testSSLEnabledLocatorDiesWhenConnectingToNonSSLLocator() {
-    IgnoredException.addIgnoredException("Remote host closed connection during handshake");
-    IgnoredException.addIgnoredException("Unrecognized SSL message, plaintext connection");
-    IgnoredException.addIgnoredException("LocatorCancelException");
-
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
+    addIgnoredException("Remote host closed connection during handshake");
+    addIgnoredException("Unrecognized SSL message, plaintext connection");
+    addIgnoredException("LocatorCancelException");
 
-    final String hostname = NetworkUtils.getServerHostName();
-    final Properties properties = getClusterProperties("", "false");
+    Properties properties = getClusterProperties("", "false");
     properties.remove(LOCATORS);
-    properties.put(SSL_CIPHERS, "any");
-    properties.put(SSL_PROTOCOLS, "any");
+    properties.setProperty(SSL_CIPHERS, "any");
+    properties.setProperty(SSL_PROTOCOLS, "any");
 
     // we set port1 so that the state file gets cleaned up later.
-    port1 = startLocatorGetPort(loc1, properties, 0);
-    loc1.invoke("expectSystemToContainThisManyMembers",
+    port1 = startLocatorGetPort(vm1, properties, 0);
+    vm1.invoke("expectSystemToContainThisManyMembers",
         () -> expectSystemToContainThisManyMembers(1));
 
-    properties.put(SSL_KEYSTORE, getSingleKeyKeystore());
-    properties.put(SSL_KEYSTORE_PASSWORD, "password");
-    properties.put(SSL_KEYSTORE_TYPE, "JKS");
-    properties.put(SSL_TRUSTSTORE, getSingleKeyKeystore());
-    properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-    properties.put(SSL_REQUIRE_AUTHENTICATION, "true");
-    properties.put(USE_CLUSTER_CONFIGURATION, "false");
-    properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
-    properties.put(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannel.LOCATOR.getConstant());
+    properties.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(SSL_KEYSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    properties.setProperty(SSL_REQUIRE_AUTHENTICATION, "true");
+    properties.setProperty(SSL_TRUSTSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_ENABLED_COMPONENTS, LOCATOR.getConstant());
+    properties.setProperty(USE_CLUSTER_CONFIGURATION, "false");
 
-    final String locators = hostname + "[" + port1 + "]";
-    properties.put(LOCATORS, locators);
+    String locators = hostName + "[" + port1 + "]";
 
+    properties.setProperty(LOCATORS, locators);
 
     // we set port2 so that the state file gets cleaned up later.
-    assertThatThrownBy(() -> startLocatorGetPort(loc2, properties, 0))
+    assertThatThrownBy(() -> startLocatorGetPort(vm2, properties, 0))
         .isInstanceOfAny(LocatorCancelException.class, RMIException.class);
     assertThat(Locator.getLocator()).isNull();
 
-    loc1.invoke("expectSystemToContainThisManyMembers",
+    vm1.invoke("expectSystemToContainThisManyMembers",
         () -> expectSystemToContainThisManyMembers(1));
 
-    loc1.invoke("stop locator", LocatorDUnitTest::stopLocator);
+    vm1.invoke("stop locator", LocatorDUnitTest::stopLocator);
   }
 
   @Test
   public void testStartTwoLocatorsWithDifferentSSLCertificates() {
-    IgnoredException.addIgnoredException("Remote host closed connection during handshake");
-    IgnoredException
-        .addIgnoredException("unable to find valid certification path to requested target");
-    IgnoredException.addIgnoredException("Received fatal alert: certificate_unknown");
-    IgnoredException.addIgnoredException("LocatorCancelException");
-    IgnoredException.addIgnoredException("Unrecognized SSL message, plaintext connection");
-
-    VM loc1 = VM.getVM(1);
-    VM loc2 = VM.getVM(2);
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    this.port2 = port2; // for cleanup in tearDown2
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    DistributedTestUtils.deleteLocatorStateFile(port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "false");
-    properties.put(SSL_CIPHERS, "any");
-    properties.put(SSL_PROTOCOLS, "any");
-    properties.put(SSL_KEYSTORE, getSingleKeyKeystore());
-    properties.put(SSL_KEYSTORE_PASSWORD, "password");
-    properties.put(SSL_KEYSTORE_TYPE, "JKS");
-    properties.put(SSL_TRUSTSTORE, getSingleKeyKeystore());
-    properties.put(SSL_TRUSTSTORE_PASSWORD, "password");
-    properties.put(SSL_REQUIRE_AUTHENTICATION, "true");
-    properties.put(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannel.LOCATOR.getConstant());
-
-    try {
-      startLocator(loc1, properties, port1);
-      loc1.invoke("expectSystemToContainThisManyMembers",
-          () -> expectSystemToContainThisManyMembers(1));
-
-      properties.put(SSL_KEYSTORE, getMultiKeyKeystore());
-      properties.put(SSL_TRUSTSTORE, getMultiKeyTruststore());
-      properties.put(SSL_LOCATOR_ALIAS, "locatorkey");
-
-      assertThatThrownBy(() -> startLocator(loc2, properties, port2))
-          .isInstanceOfAny(LocatorCancelException.class, RMIException.class);
-      assertThat(Locator.getLocator()).isNull();
+    addIgnoredException("Remote host closed connection during handshake");
+    addIgnoredException("unable to find valid certification path to requested target");
+    addIgnoredException("Received fatal alert: certificate_unknown");
+    addIgnoredException("LocatorCancelException");
+    addIgnoredException("Unrecognized SSL message, plaintext connection");
+
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = getClusterProperties(locators, "false");
+    properties.setProperty(SSL_CIPHERS, "any");
+    properties.setProperty(SSL_ENABLED_COMPONENTS, LOCATOR.getConstant());
+    properties.setProperty(SSL_KEYSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    properties.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    properties.setProperty(SSL_PROTOCOLS, "any");
+    properties.setProperty(SSL_REQUIRE_AUTHENTICATION, "true");
+    properties.setProperty(SSL_TRUSTSTORE, getSingleKeyKeystore());
+    properties.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+
+    startLocator(vm1, properties, port1);
+
+    vm1.invoke("expectSystemToContainThisManyMembers",
+        () -> expectSystemToContainThisManyMembers(1));
 
+    properties.setProperty(SSL_KEYSTORE, getMultiKeyKeystore());
+    properties.setProperty(SSL_LOCATOR_ALIAS, "locatorkey");
+    properties.setProperty(SSL_TRUSTSTORE, getMultiKeyTruststore());
 
+    assertThatThrownBy(() -> startLocator(vm2, properties, port2))
+        .isInstanceOfAny(LocatorCancelException.class, RMIException.class);
+    assertThat(Locator.getLocator()).isNull();
 
-    } finally {
-      try {
-        loc1.invoke("expectSystemToContainThisManyMembers",
-            () -> expectSystemToContainThisManyMembers(1));
-      } finally {
-        loc1.invoke("stop locator", LocatorDUnitTest::stopLocator);
-      }
-    }
+    vm1.invoke("expectSystemToContainThisManyMembers",
+        () -> expectSystemToContainThisManyMembers(1));
   }
 
   /**
@@ -533,70 +516,56 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testLeadMemberSelection() throws Exception {
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM vm3 = VM.getVM(3);
-
-    port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    final String locators = NetworkUtils.getServerHostName() + "[" + port1 + "]";
-    final Properties properties = getBasicProperties(locators);
-    properties.put(ENABLE_NETWORK_PARTITION_DETECTION, "true");
-    properties.put(DISABLE_AUTO_RECONNECT, "true");
+    String locators = hostName + "[" + port1 + "]";
 
+    Properties properties = getBasicProperties(locators);
+    properties.setProperty(DISABLE_AUTO_RECONNECT, "true");
+    properties.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "true");
     addDSProps(properties);
-    File logFile = new File("");
-    if (logFile.exists()) {
-      assertThat(logFile.delete()).isTrue();
-    }
-    Locator locator = Locator.startLocatorAndDS(port1, logFile, properties);
-    try {
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-      assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
 
-      // connect three vms and then watch the lead member selection as they
-      // are disconnected/reconnected
-      properties.put("name", "vm1");
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-      DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
+    assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
 
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
+    // connect three vms and then watch the lead member selection as they
+    // are disconnected/reconnected
+    properties.setProperty("name", "vm1");
 
-      properties.put("name", "vm2");
-      DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
+    DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
 
-      properties.put("name", "vm3");
-      DistributedMember mem3 = vm3.invoke(() -> getDistributedMember(properties));
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
 
-      // after disconnecting the first vm, the second one should become the leader
-      vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem1);
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem2, system);
+    properties.setProperty("name", "vm2");
+    DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
 
-      properties.put("name", "vm1");
-      mem1 = vm1.invoke(() -> getDistributedMember(properties));
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem2, system);
+    properties.setProperty("name", "vm3");
+    DistributedMember mem3 = vm3.invoke(() -> getDistributedMember(properties));
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
 
-      vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem2);
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem3, system);
+    // after disconnecting the first vm, the second one should become the leader
+    vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem1);
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem2, system);
 
-      vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem1);
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem3, system);
+    properties.setProperty("name", "vm1");
+    mem1 = vm1.invoke(() -> getDistributedMember(properties));
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem2, system);
 
-      vm3.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem3);
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(null, system);
+    vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem2);
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem3, system);
 
-    } finally {
-      locator.stop();
-    }
-  }
+    vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem1);
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem3, system);
 
+    vm3.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    MembershipManagerHelper.getMembershipManager(system).waitForDeparture(mem3);
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(null, system);
+  }
 
   /**
    * test lead member and coordinator failure with network partition detection enabled. It would be
@@ -615,98 +584,70 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testLeadAndCoordFailure() throws Exception {
-    IgnoredException.addIgnoredException("Possible loss of quorum due");
-
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM locatorVM = VM.getVM(3);
-    Locator locator = null;
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "true");
+    addIgnoredException("Possible loss of quorum due");
 
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = getClusterProperties(locators, "true");
     addDSProps(properties);
-    try {
-      File logFile = new File("");
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      system.getLogWriter()
-          .info("<ExpectedException action=add>java.net.ConnectException</ExpectedException>");
-      MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
-      startLocator(locatorVM, properties, port2);
 
-      assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-      // properties.put("log-level", getDUnitLogLevel());
+    addIgnoredException(ConnectException.class);
+    MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
+    startLocator(vm3, properties, port2);
 
-      DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
-      vm2.invoke(() -> getDistributedMember(properties));
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
+    assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
 
-      assertThat(system.getDistributedMember())
-          .isEqualTo(MembershipManagerHelper.getCoordinator(system));
+    DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
+    vm2.invoke(() -> getDistributedMember(properties));
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(mem1, system);
 
-      // crash the second vm and the locator. Should be okay
-      DistributedTestUtils.crashDistributedSystem(vm2);
-      locatorVM.invoke(() -> {
-        Locator loc = Locator.getLocator();
-        MembershipManagerHelper.crashDistributedSystem(loc.getDistributedSystem());
-        loc.stop();
-      });
+    assertThat(system.getDistributedMember())
+        .isEqualTo(MembershipManagerHelper.getCoordinator(system));
 
-      assertThat(vm1.invoke(LocatorDUnitTest::isSystemConnected))
-          .describedAs("Distributed system should not have disconnected").isTrue();
+    // crash the second vm and the locator. Should be okay
+    DistributedTestUtils.crashDistributedSystem(vm2);
+    vm3.invoke(() -> {
+      Locator loc = Locator.getLocator();
+      MembershipManagerHelper.crashDistributedSystem(loc.getDistributedSystem());
+      loc.stop();
+    });
 
-      // ensure quorumLost is properly invoked
-      ClusterDistributionManager dm =
-          (ClusterDistributionManager) ((InternalDistributedSystem) system)
-              .getDistributionManager();
-      MyMembershipListener listener = new MyMembershipListener();
-      dm.addMembershipListener(listener);
-      // ensure there is an unordered reader thread for the member
-      new HighPriorityAckedMessage().send(Collections.singleton(mem1), false);
+    assertThat(vm1.invoke(LocatorDUnitTest::isSystemConnected))
+        .describedAs("Distributed system should not have disconnected").isTrue();
 
-      // disconnect the first vm and demonstrate that the third vm and the
-      // locator notice the failure and exit
-      DistributedTestUtils.crashDistributedSystem(vm1);
+    // ensure quorumLost is properly invoked
+    DistributionManager dm = system.getDistributionManager();
+    MyMembershipListener listener = new MyMembershipListener();
+    dm.addMembershipListener(listener);
+    // ensure there is an unordered reader thread for the member
+    new HighPriorityAckedMessage().send(Collections.singleton(mem1), false);
 
-      /*
-       * This vm is watching vm1, which is watching vm2 which is watching locatorVM. It will take 3
-       * * (3
-       * * member-timeout) milliseconds to detect the full failure and eject the lost members from
-       * the view.
-       */
+    // disconnect the first vm and demonstrate that the third vm and the
+    // locator notice the failure and exit
+    DistributedTestUtils.crashDistributedSystem(vm1);
 
-      logger.info("waiting for my distributed system to disconnect due to partition detection");
+    /*
+     * This vm is watching vm1, which is watching vm2 which is watching locatorVM. It will take 3
+     * (3 member-timeout) milliseconds to detect the full failure and eject the lost members from
+     * the view.
+     */
 
-      await().until(() -> !system.isConnected());
+    logger.info("waiting for my distributed system to disconnect due to partition detection");
 
-      if (system.isConnected()) {
-        fail(
-            "Distributed system did not disconnect as expected - network partition detection is broken");
-      }
-      // quorumLost should be invoked if we get a ForcedDisconnect in this situation
-      assertThat(listener.quorumLostInvoked).describedAs("expected quorumLost to be invoked")
-          .isTrue();
-      assertThat(listener.suspectReasons.contains(Connection.INITIATING_SUSPECT_PROCESSING))
-          .describedAs("expected suspect processing initiated by TCPConduit").isTrue();
-    } finally {
-      if (locator != null) {
-        locator.stop();
-      }
-      LogWriter bLogger = new LocalLogWriter(ALL.intLevel(), System.out);
-      bLogger.info("<ExpectedException action=remove>service failure</ExpectedException>");
-      bLogger
-          .info("<ExpectedException action=remove>java.net.ConnectException</ExpectedException>");
-      bLogger.info(
-          "<ExpectedException action=remove>org.apache.geode.ForcedDisconnectException</ExpectedException>");
+    await().until(() -> !system.isConnected());
+
+    if (system.isConnected()) {
+      fail(
+          "Distributed system did not disconnect as expected - network partition detection is broken");
     }
+    // quorumLost should be invoked if we get a ForcedDisconnect in this situation
+    assertThat(listener.quorumLostInvoked).describedAs("expected quorumLost to be invoked")
+        .isTrue();
+    assertThat(listener.suspectReasons.contains(Connection.INITIATING_SUSPECT_PROCESSING))
+        .describedAs("expected suspect processing initiated by TCPConduit").isTrue();
   }
 
   /**
@@ -722,96 +663,61 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testLeadFailureAndCoordShutdown() throws Exception {
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM locatorVM = VM.getVM(3);
-    Locator locator = null;
-
-    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    this.port2 = port2;
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "true");
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
 
+    Properties properties = getClusterProperties(locators, "true");
     addDSProps(properties);
 
-    try {
-      File logFile = new File("");
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      locatorVM.invoke(() -> {
-        File lf = new File("");
-        try {
-          Locator loc = Locator.startLocatorAndDS(port2, lf, properties);
-          system = (InternalDistributedSystem) loc.getDistributedSystem();
-          MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
-        } catch (IOException ios) {
-          throw new RuntimeException("Unable to start locator2", ios);
-        }
-      });
-
-      assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-      DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
-      DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
+    vm3.invoke(() -> {
+      Locator loc = Locator.startLocatorAndDS(port2, null, properties);
+      system = (InternalDistributedSystem) loc.getDistributedSystem();
+      MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
+    });
 
-      assertThat(mem1).isEqualTo(MembershipManagerHelper.getLeadMember(system));
+    assertThat(MembershipManagerHelper.getLeadMember(system)).isNull();
 
-      assertThat(system.getDistributedMember())
-          .isEqualTo(MembershipManagerHelper.getCoordinator(system));
+    DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
+    DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
 
-      MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
+    assertThat(mem1).isEqualTo(MembershipManagerHelper.getLeadMember(system));
 
-      // crash the lead vm. Should be okay
-      vm1.invoke(() -> {
-        LogWriter logWriter = LocatorDUnitTest.system.getLogWriter();
-        assertThat(logWriter).isNotNull();
-        logWriter
-            .info("<ExpectedException action=add>service failure</ExpectedException>");
-        logWriter.info(
-            "<ExpectedException action=add>org.apache.geode.ConnectException</ExpectedException>");
-        logWriter.info(
-            "<ExpectedException action=add>org.apache.geode.ForcedDisconnectException</ExpectedException>");
-        MembershipManagerHelper.crashDistributedSystem(LocatorDUnitTest.system);
-      });
-
-      waitUntilTheSystemIsConnected(vm2, locatorVM);
-      // stop the locator normally. This should also be okay
-      locator.stop();
+    assertThat(system.getDistributedMember())
+        .isEqualTo(MembershipManagerHelper.getCoordinator(system));
 
-      await()
-          .until(() -> {
-            assertThat(Locator.getLocator()).describedAs("locator is not stopped").isNull();
-            return true;
-          });
+    MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
 
-      checkSystemConnectedInVMs(vm2, locatorVM);
+    // crash the lead vm. Should be okay
+    vm1.invoke(() -> {
+      addIgnoredException("service failure");
+      addIgnoredException(ForcedDisconnectException.class);
+      MembershipManagerHelper.crashDistributedSystem(system);
+    });
 
-      // the remaining non-locator member should now be the lead member
-      assertEquals(
-          "This test sometimes fails.  If the log contains "
-              + "'failed to collect all ACKs' it is a false failure.",
-          mem2, vm2.invoke(LocatorDUnitTest::getLeadMember));
+    waitUntilTheSystemIsConnected(vm2, vm3);
+    // stop the locator normally. This should also be okay
+    locator.stop();
 
-      // disconnect the first vm and demonstrate that the third vm and the
-      // locator notice the failure and exit
-      vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      locatorVM.invoke(LocatorDUnitTest::stopLocator);
-    } finally {
-      MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
-      if (locator != null) {
-        locator.stop();
-      }
-      try {
-        locatorVM.invoke(LocatorDUnitTest::stopLocator);
-      } catch (Exception e) {
-        logger.error("failed to stop locator in vm 3", e);
-      }
-    }
+    await()
+        .until(() -> {
+          assertThat(Locator.getLocator()).describedAs("locator is not stopped").isNull();
+          return true;
+        });
+
+    checkSystemConnectedInVMs(vm2, vm3);
+
+    // the remaining non-locator member should now be the lead member
+    assertEquals(
+        "This test sometimes fails.  If the log contains "
+            + "'failed to collect all ACKs' it is a false failure.",
+        mem2, vm2.invoke(LocatorDUnitTest::getLeadMember));
+
+    // disconnect the first vm and demonstrate that the third vm and the
+    // locator notice the failure and exit
+    vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    vm3.invoke(LocatorDUnitTest::stopLocator);
   }
 
   /**
@@ -825,128 +731,84 @@ public class LocatorDUnitTest implements java.io.Serializable {
    * We then shut down the group coordinator and observe the second locator pick up the job and the
    * remaining member continues to operate normally.
    */
-  // disabled on trunk - should be re-enabled on cedar_dev_Oct12
-  // this test leaves a CloserThread around forever that logs "pausing" messages every 500 ms
   @Test
   public void testForceDisconnectAndPeerShutdownCause() throws Exception {
-    Host host = Host.getHost(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM locvm = VM.getVM(3);
-    Locator locator = null;
-
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
-    final String host0 = NetworkUtils.getServerHostName(host);
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-
-    final Properties properties = new Properties();
-    properties.put(MCAST_PORT, "0");
-    properties.put(LOCATORS, locators);
-    properties.put(ENABLE_NETWORK_PARTITION_DETECTION, "true");
-    properties.put(DISABLE_AUTO_RECONNECT, "true");
-    properties.put(MEMBER_TIMEOUT, "2000");
-    properties.put(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
-
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties properties = new Properties();
+    properties.setProperty(MCAST_PORT, "0");
+    properties.setProperty(LOCATORS, locators);
+    properties.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "true");
+    properties.setProperty(DISABLE_AUTO_RECONNECT, "true");
+    properties.setProperty(MEMBER_TIMEOUT, "2000");
     addDSProps(properties);
 
-    try {
-      final String uname = "testForceDisconnectAndPeerShutdownCause";
-      File logFile = new File("");
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      locvm.invoke(() -> {
-        File lf = new File("");
-        try {
-          Locator loc = Locator.startLocatorAndDS(port2, lf, properties);
-          system = (InternalDistributedSystem) loc.getDistributedSystem();
-        } catch (IOException ios) {
-          throw new RuntimeException("Unable to start locator2", ios);
-        }
-      });
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-      SerializableRunnable crashSystem = new SerializableRunnable("Crash system") {
-        @Override
-        public void run() {
-          DistributedSystem msys = LocatorDUnitTest.system;
-          msys.getLogWriter()
-              .info("<ExpectedException action=add>service failure</ExpectedException>");
-          msys.getLogWriter().info(
-              "<ExpectedException action=add>org.apache.geode.ConnectException</ExpectedException>");
-          msys.getLogWriter().info(
-              "<ExpectedException action=add>org.apache.geode.ForcedDisconnectException</ExpectedException>");
-          msys.getLogWriter()
-              .info("<ExpectedException action=add>Possible loss of quorum</ExpectedException>");
-          hook = new TestHook();
-          MembershipManagerHelper.getMembershipManager(msys).registerTestHook(hook);
-          try {
-            MembershipManagerHelper.crashDistributedSystem(msys);
-          } finally {
-            hook.reset();
-          }
-        }
-      };
+    vm3.invoke(() -> {
+      Locator loc = Locator.startLocatorAndDS(port2, null, properties);
+      system = (InternalDistributedSystem) loc.getDistributedSystem();
+    });
 
-      assertNull(MembershipManagerHelper.getLeadMember(system));
+    SerializableRunnable crashSystem = new SerializableRunnable("Crash system") {
+      @Override
+      public void run() {
+        addIgnoredException("service failure");
+        addIgnoredException("Possible loss of quorum");
+        addIgnoredException(ForcedDisconnectException.class);
 
-      final DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
-      final DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
+        hook = new TestHook();
+        MembershipManagerHelper.getMembershipManager(system).registerTestHook(hook);
+        try {
+          MembershipManagerHelper.crashDistributedSystem(system);
+        } finally {
+          hook.reset();
+        }
+      }
+    };
 
-      assertEquals(mem1, MembershipManagerHelper.getLeadMember(system));
+    assertNull(MembershipManagerHelper.getLeadMember(system));
 
-      assertEquals(system.getDistributedMember(), MembershipManagerHelper.getCoordinator(system));
+    DistributedMember mem1 = vm1.invoke(() -> getDistributedMember(properties));
+    DistributedMember mem2 = vm2.invoke(() -> getDistributedMember(properties));
 
-      assertTrue("Distributed system should not have disconnected", isSystemConnected());
+    assertEquals(mem1, MembershipManagerHelper.getLeadMember(system));
 
-      assertTrue("Distributed system should not have disconnected",
-          vm2.invoke(() -> LocatorDUnitTest.isSystemConnected()));
+    assertEquals(system.getDistributedMember(), MembershipManagerHelper.getCoordinator(system));
 
-      assertTrue("Distributed system should not have disconnected",
-          locvm.invoke(() -> LocatorDUnitTest.isSystemConnected()));
+    assertTrue("Distributed system should not have disconnected", isSystemConnected());
 
-      vm2.invokeAsync(crashSystem);
+    assertTrue("Distributed system should not have disconnected",
+        vm2.invoke(() -> isSystemConnected()));
 
-      Wait.pause(1000); // 4 x the member-timeout
+    assertTrue("Distributed system should not have disconnected",
+        vm3.invoke(() -> isSystemConnected()));
 
+    vm2.invokeAsync(crashSystem);
 
-      // request member removal for first peer from second peer.
-      vm2.invoke(new SerializableRunnable("Request Member Removal") {
+    Wait.pause(1000); // 4 x the member-timeout
 
-        @Override
-        public void run() {
-          DistributedSystem msys = LocatorDUnitTest.system;
-          MembershipManager mmgr = MembershipManagerHelper.getMembershipManager(msys);
+    // request member removal for first peer from second peer.
+    vm2.invoke(new SerializableRunnable("Request Member Removal") {
 
-          // check for shutdown cause in MembershipManager. Following call should
-          // throw DistributedSystemDisconnectedException which should have cause as
-          // ForceDisconnectException.
-          try {
-            msys.getLogWriter().info(
-                "<ExpectedException action=add>Membership: requesting removal of </ExpectedException>");
-            mmgr.requestMemberRemoval(mem1, "test reasons");
-            msys.getLogWriter().info(
-                "<ExpectedException action=remove>Membership: requesting removal of </ExpectedException>");
-            fail("It should have thrown exception in requestMemberRemoval");
-          } catch (DistributedSystemDisconnectedException e) {
-            Throwable cause = e.getCause();
-            assertTrue("This should have been ForceDisconnectException but found " + cause,
-                cause instanceof ForcedDisconnectException);
-          } finally {
-            hook.reset();
-          }
+      @Override
+      public void run() {
+        MembershipManager mmgr = MembershipManagerHelper.getMembershipManager(system);
+
+        // check for shutdown cause in MembershipManager. Following call should
+        // throw DistributedSystemDisconnectedException which should have cause as
+        // ForceDisconnectException.
+        try (IgnoredException i = addIgnoredException("Membership: requesting removal of")) {
+          mmgr.requestMemberRemoval(mem1, "test reasons");
+          fail("It should have thrown exception in requestMemberRemoval");
+        } catch (DistributedSystemDisconnectedException e) {
+          assertThat(e).hasRootCauseInstanceOf(ForcedDisconnectException.class);
+        } finally {
+          hook.reset();
         }
-      });
-
-    } finally {
-      if (locator != null) {
-        locator.stop();
       }
-      locvm.invoke(() -> stopLocator());
-      assertTrue("locator is not stopped", Locator.getLocators().isEmpty());
-    }
+    });
   }
 
   /**
@@ -959,148 +821,110 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testLeadShutdownAndCoordFailure() throws Exception {
-    VM memberThatWillBeShutdownVM = VM.getVM(1);
-    VM memberVM = VM.getVM(2);
-    VM locatorThatWillBeShutdownVM = VM.getVM(3);
-    Locator locator = null;
-    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    this.port1 = port1;
-    final int port2 = ports[1];
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-    final Properties properties = getClusterProperties(locators, "true");
+    VM memberThatWillBeShutdownVM = vm1;
+    VM memberVM = vm2;
+    VM locatorThatWillBeShutdownVM = vm3;
 
-    addDSProps(properties);
-    try {
-      locatorThatWillBeShutdownVM.invoke(() -> {
-        Locator localLocator;
-        try {
-          localLocator = Locator.startLocatorAndDS(port2, new File(""), properties);
-          system = (InternalDistributedSystem) localLocator.getDistributedSystem();
-          assertThat(localLocator.getDistributedSystem().isConnected()).isTrue();
-        } catch (IOException ios) {
-          throw new RuntimeException("Unable to start locator1", ios);
-        }
-      });
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
 
-      // Test runner will be locator 2
-      locator = Locator.startLocatorAndDS(port1, new File(""), properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      assertThat(locator.getDistributedSystem().isConnected()).isTrue();
-      DistributedSystem testRunnerLocatorDS = locator.getDistributedSystem();
-      testRunnerLocatorDS.getLogWriter().info(
-          "<ExpectedException action=add>org.apache.geode.ForcedDisconnectException</ExpectedException>");
-      assertThat(MembershipManagerHelper.getLeadMember(testRunnerLocatorDS))
-          .describedAs("There was a lead member when there should not be.").isNull();
-
-      DistributedMember distributedMemberThatWillBeShutdown =
-          memberThatWillBeShutdownVM.invoke(() -> getDistributedMember(properties));
-      memberThatWillBeShutdownVM
-          .invoke(() -> MembershipManagerHelper.inhibitForcedDisconnectLogging(true));
-
-      DistributedMember distributedMember = memberVM.invoke(() -> getDistributedMember(properties));
-
-      DistributedMember locatorMemberToBeShutdown =
-          locatorThatWillBeShutdownVM.invoke(LocatorDUnitTest::getLocatorDistributedMember);
-
-      waitForMemberToBecomeLeadMemberOfDistributedSystem(distributedMemberThatWillBeShutdown,
-          testRunnerLocatorDS);
-      DistributedMember oldLeader = MembershipManagerHelper.getLeadMember(testRunnerLocatorDS);
-
-      assertThat(locatorMemberToBeShutdown)
-          .isEqualTo(MembershipManagerHelper.getCoordinator(testRunnerLocatorDS));
-      DistributedMember oldCoordinator =
-          MembershipManagerHelper.getCoordinator(testRunnerLocatorDS);
-
-      // crash the lead locator. Should be okay
-      locatorThatWillBeShutdownVM.invoke("crash locator", () -> {
-        Locator loc = Locator.getLocator();
-        DistributedSystem distributedSystem = loc.getDistributedSystem();
-        LogWriter logWriter = distributedSystem.getLogWriter();
-        assertThat(logWriter).isNotNull();
-        logWriter
-            .info("<ExpectedException action=add>service failure</ExpectedException>");
-        logWriter.info(
-            "<ExpectedException action=add>org.apache.geode.ForcedDisconnectException</ExpectedException>");
-        logWriter.info(
-            "<ExpectedException action=add>org.apache.geode.ConnectException</ExpectedException>");
-        MembershipManagerHelper.crashDistributedSystem(distributedSystem);
-        loc.stop();
-      });
+    Properties properties = getClusterProperties(locators, "true");
+    addDSProps(properties);
 
-      await().until(testRunnerLocatorDS::isConnected);
+    locatorThatWillBeShutdownVM.invoke(() -> {
+      Locator localLocator = Locator.startLocatorAndDS(port2, null, properties);
+      system = (InternalDistributedSystem) localLocator.getDistributedSystem();
+      assertThat(localLocator.getDistributedSystem().isConnected()).isTrue();
+    });
 
-      waitUntilTheSystemIsConnected(memberThatWillBeShutdownVM, memberVM);
+    // Test runner will be locator 2
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
+    assertThat(locator.getDistributedSystem().isConnected()).isTrue();
+    DistributedSystem testRunnerLocatorDS = locator.getDistributedSystem();
+    addIgnoredException(ForcedDisconnectException.class);
+    assertThat(MembershipManagerHelper.getLeadMember(testRunnerLocatorDS))
+        .describedAs("There was a lead member when there should not be.").isNull();
+
+    DistributedMember distributedMemberThatWillBeShutdown =
+        memberThatWillBeShutdownVM.invoke(() -> getDistributedMember(properties));
+    memberThatWillBeShutdownVM
+        .invoke(() -> MembershipManagerHelper.inhibitForcedDisconnectLogging(true));
+
+    DistributedMember distributedMember = memberVM.invoke(() -> getDistributedMember(properties));
+
+    DistributedMember locatorMemberToBeShutdown =
+        locatorThatWillBeShutdownVM.invoke(LocatorDUnitTest::getLocatorDistributedMember);
+
+    waitForMemberToBecomeLeadMemberOfDistributedSystem(distributedMemberThatWillBeShutdown,
+        testRunnerLocatorDS);
+    DistributedMember oldLeader = MembershipManagerHelper.getLeadMember(testRunnerLocatorDS);
+
+    assertThat(locatorMemberToBeShutdown)
+        .isEqualTo(MembershipManagerHelper.getCoordinator(testRunnerLocatorDS));
+    DistributedMember oldCoordinator =
+        MembershipManagerHelper.getCoordinator(testRunnerLocatorDS);
+
+    // crash the lead locator. Should be okay
+    locatorThatWillBeShutdownVM.invoke("crash locator", () -> {
+      Locator loc = Locator.getLocator();
+      DistributedSystem distributedSystem = loc.getDistributedSystem();
+      addIgnoredException("service failure");
+      addIgnoredException(ForcedDisconnectException.class);
+      MembershipManagerHelper.crashDistributedSystem(distributedSystem);
+      loc.stop();
+    });
 
-      // disconnect the first vm and demonstrate that the non-lead vm and the
-      // locator notice the failure and continue to run
-      memberThatWillBeShutdownVM.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      await().until(
-          () -> memberThatWillBeShutdownVM.invoke(() -> !LocatorDUnitTest.isSystemConnected()));
-      await().until(() -> memberVM.invoke(LocatorDUnitTest::isSystemConnected));
+    await().until(testRunnerLocatorDS::isConnected);
 
-      assertThat(memberVM.invoke(LocatorDUnitTest::isSystemConnected))
-          .describedAs("Distributed system should not have disconnected").isTrue();
+    waitUntilTheSystemIsConnected(memberThatWillBeShutdownVM, memberVM);
 
-      await("waiting for the old coordinator to drop out").until(
-          () -> MembershipManagerHelper.getCoordinator(testRunnerLocatorDS) != oldCoordinator);
+    // disconnect the first vm and demonstrate that the non-lead vm and the
+    // locator notice the failure and continue to run
+    memberThatWillBeShutdownVM.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    await().until(
+        () -> memberThatWillBeShutdownVM.invoke(() -> !isSystemConnected()));
+    await().until(() -> memberVM.invoke(LocatorDUnitTest::isSystemConnected));
 
-      await().until(() -> {
-        DistributedMember survivingDistributedMember = testRunnerLocatorDS.getDistributedMember();
-        DistributedMember coordinator = MembershipManagerHelper.getCoordinator(testRunnerLocatorDS);
-        assertThat(survivingDistributedMember).isEqualTo(coordinator);
-        return true;
-      });
+    assertThat(memberVM.invoke(LocatorDUnitTest::isSystemConnected))
+        .describedAs("Distributed system should not have disconnected").isTrue();
 
-      await("Waiting for the old leader to drop out")
-          .pollInterval(1, TimeUnit.SECONDS).until(() -> {
-            DistributedMember leader = MembershipManagerHelper.getLeadMember(testRunnerLocatorDS);
-            return leader != oldLeader;
-          });
+    await("waiting for the old coordinator to drop out").until(
+        () -> MembershipManagerHelper.getCoordinator(testRunnerLocatorDS) != oldCoordinator);
 
-      await().until(() -> {
-        assertThat(distributedMember)
-            .isEqualTo(MembershipManagerHelper.getLeadMember(testRunnerLocatorDS));
-        return true;
-      });
+    await().untilAsserted(() -> {
+      DistributedMember survivingDistributedMember = testRunnerLocatorDS.getDistributedMember();
+      DistributedMember coordinator = MembershipManagerHelper.getCoordinator(testRunnerLocatorDS);
+      assertThat(survivingDistributedMember).isEqualTo(coordinator);
+    });
 
-    } finally {
-      memberVM.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    await("Waiting for the old leader to drop out")
+        .pollInterval(1, SECONDS).until(() -> {
+          DistributedMember leader = MembershipManagerHelper.getLeadMember(testRunnerLocatorDS);
+          return leader != oldLeader;
+        });
 
-      if (locator != null) {
-        locator.stop();
-      }
-      locatorThatWillBeShutdownVM.invoke(LocatorDUnitTest::stopLocator);
-    }
+    await().untilAsserted(() -> {
+      assertThat(distributedMember)
+          .isEqualTo(MembershipManagerHelper.getLeadMember(testRunnerLocatorDS));
+    });
   }
 
-
   /**
    * Tests that attempting to connect to a distributed system in which no locator is defined throws
    * an exception.
    */
   @Test
   public void testNoLocator() {
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    String locators = NetworkUtils.getServerHostName() + "[" + port + "]";
-    Properties props = getBasicProperties(locators);
+    String locators = hostName + "[" + port2 + "]";
 
+    Properties props = getBasicProperties(locators);
     addDSProps(props);
-    final String expected = "java.net.ConnectException";
-    final String addExpected = "<ExpectedException action=add>" + expected + "</ExpectedException>";
-    final String removeExpected =
-        "<ExpectedException action=remove>" + expected + "</ExpectedException>";
 
-    LogWriter bgexecLogger = new LocalLogWriter(ALL.intLevel(), System.out);
-    bgexecLogger.info(addExpected);
+    addIgnoredException(ConnectException.class);
 
-    boolean exceptionOccurred = true;
     try {
       getConnectedDistributedSystem(props);
-      exceptionOccurred = false;
+      fail("Should have thrown a GemFireConfigException");
 
     } catch (DistributionException ex) {
       // I guess it can throw this too...
@@ -1108,17 +932,6 @@ public class LocatorDUnitTest implements java.io.Serializable {
     } catch (GemFireConfigException ex) {
       String s = ex.getMessage();
       assertThat(s.contains("Locator does not exist")).isTrue();
-
-    } catch (Exception ex) {
-      // if you see this fail, determine if unexpected exception is expected
-      // if expected then add in a catch block for it above this catch
-      throw new RuntimeException("Failed with unexpected exception", ex);
-    } finally {
-      bgexecLogger.info(removeExpected);
-    }
-
-    if (!exceptionOccurred) {
-      fail("Should have thrown a GemFireConfigException");
     }
   }
 
@@ -1131,62 +944,46 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testOneLocator() {
-    VM vm0 = VM.getVM(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-
-    final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    final String locators = NetworkUtils.getServerHostName() + "[" + port + "]";
+    String locators = hostName + "[" + port2 + "]";
 
-    startLocatorWithSomeBasicProperties(vm0, port);
+    startLocatorWithSomeBasicProperties(vm0, port2);
 
-    try {
-      SerializableRunnable connect = new SerializableRunnable("Connect to " + locators) {
-        @Override
-        public void run() {
-          Properties props = getBasicProperties(locators);
-          props.setProperty(MEMBER_TIMEOUT, "1000");
-          addDSProps(props);
-          getConnectedDistributedSystem(props);
-        }
-      };
+    SerializableRunnable connect = new SerializableRunnable("Connect to " + locators) {
+      @Override
+      public void run() {
+        Properties props = getBasicProperties(locators);
+        props.setProperty(MEMBER_TIMEOUT, "1000");
+        addDSProps(props);
 
-      vm1.invoke(connect);
-      vm2.invoke(connect);
+        getConnectedDistributedSystem(props);
+      }
+    };
 
-      Properties props = getBasicProperties(locators);
-      props.setProperty(MEMBER_TIMEOUT, "1000");
-      addDSProps(props);
-      system = getConnectedDistributedSystem(props);
+    vm1.invoke(connect);
+    vm2.invoke(connect);
 
-      final DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-      logger.info("coordinator before termination of locator is " + coord);
+    Properties props = getBasicProperties(locators);
+    props.setProperty(MEMBER_TIMEOUT, "1000");
+    addDSProps(props);
 
-      vm0.invoke(LocatorDUnitTest::stopLocator);
+    system = getConnectedDistributedSystem(props);
 
-      // now ensure that one of the remaining members became the coordinator
+    DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
+    logger.info("coordinator before termination of locator is " + coord);
 
-      await()
-          .until(() -> !coord.equals(MembershipManagerHelper.getCoordinator(system)));
+    vm0.invoke(LocatorDUnitTest::stopLocator);
 
-      DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system);
-      logger.info("coordinator after shutdown of locator was " + newCoord);
-      if (coord.equals(newCoord)) {
-        fail("another member should have become coordinator after the locator was stopped");
-      }
+    // now ensure that one of the remaining members became the coordinator
 
-      system.disconnect();
+    await().until(() -> !coord.equals(MembershipManagerHelper.getCoordinator(system)));
 
-      vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-
-    } finally {
-      vm0.invoke(LocatorDUnitTest::stopLocator);
+    DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system);
+    logger.info("coordinator after shutdown of locator was " + newCoord);
+    if (coord.equals(newCoord)) {
+      fail("another member should have become coordinator after the locator was stopped");
     }
   }
 
-
   /**
    * Tests starting one locator in a remote VM and having multiple members of the distributed system
    * join it. This ensures that members start up okay, and that handling of a stopped locator is
@@ -1195,82 +992,60 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testLocatorBecomesCoordinator() {
-    final String expected = "java.net.ConnectException";
-    final String addExpected = "<ExpectedException action=add>" + expected + "</ExpectedException>";
-    final String removeExpected =
-        "<ExpectedException action=remove>" + expected + "</ExpectedException>";
+    addIgnoredException(ConnectException.class);
 
-    VM vm0 = VM.getVM(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
+    String locators = hostName + "[" + port2 + "]";
 
-    final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    final String locators = NetworkUtils.getServerHostName() + "[" + port + "]";
+    startLocatorPreferredCoordinators(vm0, port2);
 
-    startLocatorPreferredCoordinators(vm0, port);
-
-    try {
-
-      final Properties props = new Properties();
-      props.setProperty(LOCATORS, locators);
-      props.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "true");
+    Properties props = new Properties();
+    props.setProperty(LOCATORS, locators);
+    props.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "true");
+    addDSProps(props);
 
-      addDSProps(props);
-      vm1.invoke(() -> {
-        DistributedSystem sys = getSystem(props);
-        sys.getLogWriter().info(addExpected);
-      });
-      vm2.invoke(() -> {
-        DistributedSystem sys = getSystem(props);
-        sys.getLogWriter().info(addExpected);
-      });
+    vm1.invoke(() -> {
+      getSystem(props);
+    });
+    vm2.invoke(() -> {
+      getSystem(props);
+    });
 
-      system = getSystem(props);
+    system = getSystem(props);
 
-      final DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-      logger.info("coordinator before termination of locator is " + coord);
+    DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
+    logger.info("coordinator before termination of locator is " + coord);
 
-      vm0.invoke(LocatorDUnitTest::stopLocator);
+    vm0.invoke(LocatorDUnitTest::stopLocator);
 
-      // now ensure that one of the remaining members became the coordinator
-      await()
-          .until(() -> !coord.equals(MembershipManagerHelper.getCoordinator(system)));
+    // now ensure that one of the remaining members became the coordinator
+    await().until(() -> !coord.equals(MembershipManagerHelper.getCoordinator(system)));
 
-      DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system);
-      logger.info("coordinator after shutdown of locator was " + newCoord);
-      if (newCoord == null || coord.equals(newCoord)) {
-        fail("another member should have become coordinator after the locator was stopped: "
-            + newCoord);
-      }
+    DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system);
+    logger.info("coordinator after shutdown of locator was " + newCoord);
+    if (newCoord == null || coord.equals(newCoord)) {
+      fail("another member should have become coordinator after the locator was stopped: "
+          + newCoord);
+    }
 
-      // restart the locator to demonstrate reconnection & make disconnects faster
-      // it should also regain the role of coordinator, so we check to make sure
-      // that the coordinator has changed
-      startLocatorPreferredCoordinators(vm0, port);
+    // restart the locator to demonstrate reconnection & make disconnects faster
+    // it should also regain the role of coordinator, so we check to make sure
+    // that the coordinator has changed
+    startLocatorPreferredCoordinators(vm0, port2);
 
-      final DistributedMember tempCoord = newCoord;
+    DistributedMember tempCoord = newCoord;
 
-      await()
-          .until(() -> !tempCoord.equals(MembershipManagerHelper.getCoordinator(system)));
+    await().until(() -> !tempCoord.equals(MembershipManagerHelper.getCoordinator(system)));
 
-      system.disconnect();
-      LogWriter bgexecLogger = new LocalLogWriter(ALL.intLevel(), System.out);
-      bgexecLogger.info(removeExpected);
+    system.disconnect();
 
-      checkConnectionAndPrintInfo(vm1);
-      checkConnectionAndPrintInfo(vm2);
-      vm0.invoke(LocatorDUnitTest::stopLocator);
-    } finally {
-      vm0.invoke(LocatorDUnitTest::stopLocator);
-    }
+    checkConnectionAndPrintInfo(vm1);
+    checkConnectionAndPrintInfo(vm2);
   }
 
   @Test
   public void testConcurrentLocatorStartup() throws Exception {
     List<AvailablePort.Keeper> portKeepers =
         AvailablePortHelper.getRandomAvailableTCPPortKeepers(4);
-    List<AsyncInvocation<Object>> asyncInvocations = new ArrayList(portKeepers.size());
     StringBuilder sb = new StringBuilder(100);
     for (int i = 0; i < portKeepers.size(); i++) {
       AvailablePort.Keeper keeper = portKeepers.get(i);
@@ -1280,20 +1055,19 @@ public class LocatorDUnitTest implements java.io.Serializable {
       }
     }
     String locators = sb.toString();
+
     Properties dsProps = getClusterProperties(locators, "false");
+
+    List<AsyncInvocation<Object>> asyncInvocations = new ArrayList<>(portKeepers.size());
+
     for (int i = 0; i < portKeepers.size(); i++) {
       AvailablePort.Keeper keeper = portKeepers.get(i);
-      final int port = keeper.getPort();
-      DistributedTestUtils.deleteLocatorStateFile(port);
+      int port = keeper.getPort();
       keeper.release();
-      AsyncInvocation<Object> startLocator = VM.getVM(i).invokeAsync("start locator " + i, () -> {
+      AsyncInvocation<Object> startLocator = getVM(i).invokeAsync("start locator " + i, () -> {
         DUnitBlackboard blackboard = getBlackboard();
         blackboard.signalGate("" + port);
-        try {
-          blackboard.waitForGate("startLocators", 5, MINUTES);
-        } catch (InterruptedException e) {
-          throw new RuntimeException("test was interrupted");
-        }
+        blackboard.waitForGate("startLocators", 5, MINUTES);
         startLocatorBase(dsProps, port);
         assertTrue(isSystemConnected());
         System.out.println("Locator startup completed");
@@ -1308,15 +1082,9 @@ public class LocatorDUnitTest implements java.io.Serializable {
       asyncInvocations.get(i).await();
     }
     for (int i = 0; i < asyncInvocations.size(); i++) {
-      assertTrue(VM.getVM(i).invoke("assert all in same cluster", () -> CacheFactory
+      assertTrue(getVM(i).invoke("assert all in same cluster", () -> CacheFactory
           .getAnyInstance().getDistributedSystem().getAllOtherMembers().size() == expectedCount));
     }
-    for (int i = 0; i < asyncInvocations.size(); i++) {
-      VM.getVM(i).invoke(() -> {
-        Locator.getLocator().stop();
-        system = null;
-      });
-    }
   }
 
   /**
@@ -1324,63 +1092,37 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testTwoLocatorsTwoServers() {
-    VM vm0 = VM.getVM(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM vm3 = VM.getVM(3);
-
-    int[] freeTCPPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = freeTCPPorts[0];
-    this.port1 = port1;
-    final int port2 = freeTCPPorts[1];
-    this.port2 = port2;
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-
-    final Properties dsProps = getBasicProperties(locators);
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
+
+    Properties dsProps = getBasicProperties(locators);
     addDSProps(dsProps);
 
     startLocator(vm0, dsProps, port1);
-    try {
-      startLocator(vm3, dsProps, port2);
-      try {
-
-        SerializableRunnable connect = new SerializableRunnable("Connect to " + locators) {
-          @Override
-          public void run() {
-            Properties props = getBasicProperties(locators);
-            addDSProps(props);
-            getConnectedDistributedSystem(props);
-          }
-        };
-        vm1.invoke(connect);
-        vm2.invoke(connect);
 
-        Properties props = getBasicProperties(locators);
+    startLocator(vm3, dsProps, port2);
 
+    SerializableRunnable connect = new SerializableRunnable("Connect to " + locators) {
+      @Override
+      public void run() {
+        Properties props = getBasicProperties(locators);
         addDSProps(props);
-        system = getConnectedDistributedSystem(props);
-
-        await().until(() -> system.getDM().getViewMembers().size() >= 3);
+        getConnectedDistributedSystem(props);
+      }
+    };
+    vm1.invoke(connect);
+    vm2.invoke(connect);
 
-        // three applications plus
-        assertThat(system.getDM().getViewMembers().size()).isEqualTo(5);
+    Properties props = getBasicProperties(locators);
 
-        system.disconnect();
+    addDSProps(props);
+    system = getConnectedDistributedSystem(props);
 
-        vm1.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-        vm2.invoke(LocatorDUnitTest::disconnectDistributedSystem);
+    await().until(() -> system.getDM().getViewMembers().size() >= 3);
 
-      } finally {
-        vm3.invoke(LocatorDUnitTest::stopLocator);
-      }
-    } finally {
-      vm0.invoke(LocatorDUnitTest::stopLocator);
-    }
+    // three applications plus
+    assertThat(system.getDM().getViewMembers().size()).isEqualTo(5);
   }
 
-
   private void waitUntilLocatorBecomesCoordinator() {
     await().until(() -> system != null && system.isConnected() &&
         getCoordinator()
@@ -1405,230 +1147,183 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testMultipleLocatorsRestartingAtSameTime() {
-    VM vm0 = VM.getVM(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM vm3 = VM.getVM(3);
-    VM vm4 = VM.getVM(4);
-
-    int[] freeTCPPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
-    this.port1 = freeTCPPorts[0];
-    this.port2 = freeTCPPorts[1];
-    int port3 = freeTCPPorts[2];
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2, port3);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators =
-        host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]," + host0 + "[" + port3 + "]";
-
-    final Properties dsProps = getBasicProperties(locators);
+    String locators =
+        hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]," + hostName + "[" + port3
+            + "]";
+
+    Properties dsProps = getBasicProperties(locators);
     dsProps.setProperty(LOG_LEVEL, logger.getLevel().name());
     dsProps.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, "true");
-
     addDSProps(dsProps);
+
     startLocator(vm0, dsProps, port1);
     startLocator(vm1, dsProps, port2);
     startLocator(vm2, dsProps, port3);
 
-    try {
-      vm3.invoke(() -> {
-        getConnectedDistributedSystem(dsProps);
-        return true;
-      });
-      vm4.invoke(() -> {
-        getConnectedDistributedSystem(dsProps);
-        return true;
-      });
+    vm3.invoke(() -> {
+      getConnectedDistributedSystem(dsProps);
+    });
+    vm4.invoke(() -> {
+      getConnectedDistributedSystem(dsProps);
+    });
 
-      system = getConnectedDistributedSystem(dsProps);
-
-      await().until(() -> system.getDM().getViewMembers().size() == 6);
-
-      // three applications plus
-      assertThat(system.getDM().getViewMembers().size()).isEqualTo(6);
-
-      vm0.invoke(LocatorDUnitTest::stopLocator);
-      vm1.invoke(LocatorDUnitTest::stopLocator);
-      vm2.invoke(LocatorDUnitTest::stopLocator);
-
-      await()
-          .until(() -> system.getDM().getMembershipManager().getView().size() <= 3);
-
-      final String newLocators = host0 + "[" + port2 + "]," + host0 + "[" + port3 + "]";
-      dsProps.setProperty(LOCATORS, newLocators);
-
-      final InternalDistributedMember currentCoordinator = getCoordinator();
-      DistributedMember vm3ID = vm3.invoke(() -> system.getDM().getDistributionManagerId());
-      assertEquals(
-          "View is " + system.getDM().getMembershipManager().getView() + " and vm3's ID is "
-              + vm3ID,
-          vm3ID, vm3.invoke(
-              () -> system.getDistributionManager().getMembershipManager().getView().getCreator()));
-
-      startLocator(vm1, dsProps, port2);
-      startLocator(vm2, dsProps, port3);
-
-      await()
-          .until(() -> !getCoordinator().equals(currentCoordinator)
-              && system.getDM().getAllHostedLocators().size() == 2);
-
-      vm1.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-      vm2.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-      vm3.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-      vm4.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-
-      int netViewId = vm1.invoke("Checking ViewCreator", () -> getView().getViewId());
-      assertThat((int) vm2.invoke("checking ViewID", () -> getView().getViewId()))
-          .isEqualTo(netViewId);
-      assertThat((int) vm3.invoke("checking ViewID", () -> getView().getViewId()))
-          .isEqualTo(netViewId);
-      assertThat((int) vm4.invoke("checking ViewID", () -> getView().getViewId()))
-          .isEqualTo(netViewId);
-      assertThat((boolean) vm4
-          .invoke("Checking ViewCreator",
-              () -> system.getDistributedMember().equals(getView().getCreator()))).isFalse();
-      // Given the start up order of servers, this server is the elder server
-      assertFalse(vm3
-          .invoke("Checking ViewCreator",
-              () -> system.getDistributedMember().equals(getView().getCreator())));
-      if (vm1.invoke(() -> system.getDistributedMember().equals(getView().getCreator()))) {
-        assertThat((boolean) vm2.invoke("Checking ViewCreator",
-            () -> system.getDistributedMember().equals(getView().getCreator())))
-                .isFalse();
-      } else {
-        assertThat((boolean) vm2.invoke("Checking ViewCreator",
-            () -> system.getDistributedMember().equals(getView().getCreator())))
-                .isTrue();
-      }
+    system = getConnectedDistributedSystem(dsProps);
 
-    } finally {
-      system.disconnect();
-      vm3.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      vm4.invoke(LocatorDUnitTest::disconnectDistributedSystem);
-      vm2.invoke(LocatorDUnitTest::stopLocator);
-      vm1.invoke(LocatorDUnitTest::stopLocator);
+    await().until(() -> system.getDM().getViewMembers().size() == 6);
+
+    // three applications plus
+    assertThat(system.getDM().getViewMembers().size()).isEqualTo(6);
+
+    vm0.invoke(LocatorDUnitTest::stopLocator);
+    vm1.invoke(LocatorDUnitTest::stopLocator);
+    vm2.invoke(LocatorDUnitTest::stopLocator);
+
+    await()
+        .until(() -> system.getDM().getMembershipManager().getView().size() <= 3);
+
+    String newLocators = hostName + "[" + port2 + "]," + hostName + "[" + port3 + "]";
+    dsProps.setProperty(LOCATORS, newLocators);
+
+    InternalDistributedMember currentCoordinator = getCoordinator();
+    DistributedMember vm3ID = vm3.invoke(() -> system.getDM().getDistributionManagerId());
+    assertEquals(
+        "View is " + system.getDM().getMembershipManager().getView() + " and vm3's ID is "
+            + vm3ID,
+        vm3ID, vm3.invoke(
+            () -> system.getDistributionManager().getMembershipManager().getView().getCreator()));
+
+    startLocator(vm1, dsProps, port2);
+    startLocator(vm2, dsProps, port3);
+
+    await()
+        .until(() -> !getCoordinator().equals(currentCoordinator)
+            && system.getDM().getAllHostedLocators().size() == 2);
+
+    vm1.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
+    vm2.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
+    vm3.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
+    vm4.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
+
+    int netViewId = vm1.invoke("Checking ViewCreator", () -> getView().getViewId());
+    assertThat((int) vm2.invoke("checking ViewID", () -> getView().getViewId()))
+        .isEqualTo(netViewId);
+    assertThat((int) vm3.invoke("checking ViewID", () -> getView().getViewId()))
+        .isEqualTo(netViewId);
+    assertThat((int) vm4.invoke("checking ViewID", () -> getView().getViewId()))
+        .isEqualTo(netViewId);
+    assertThat((boolean) vm4
+        .invoke("Checking ViewCreator",
+            () -> system.getDistributedMember().equals(getView().getCreator()))).isFalse();
+    // Given the start up order of servers, this server is the elder server
+    assertFalse(vm3
+        .invoke("Checking ViewCreator",
+            () -> system.getDistributedMember().equals(getView().getCreator())));
+    if (vm1.invoke(() -> system.getDistributedMember().equals(getView().getCreator()))) {
+      assertThat((boolean) vm2.invoke("Checking ViewCreator",
+          () -> system.getDistributedMember().equals(getView().getCreator())))
+              .isFalse();
+    } else {
+      assertThat((boolean) vm2.invoke("Checking ViewCreator",
+          () -> system.getDistributedMember().equals(getView().getCreator())))
+              .isTrue();
     }
   }
 
-
   @Test
   public void testMultipleLocatorsRestartingAtSameTimeWithMissingServers() throws Exception {
-    IgnoredException.addIgnoredException("ForcedDisconnectException");
-    IgnoredException.addIgnoredException("Possible loss of quorum");
-    IgnoredException.addIgnoredException("java.lang.Exception: Message id is");
-
-    VM vm0 = VM.getVM(0);
-    VM vm1 = VM.getVM(1);
-    VM vm2 = VM.getVM(2);
-    VM vm3 = VM.getVM(3);
-    VM vm4 = VM.getVM(4);
-
-    int[] freeTCPPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
-    this.port1 = freeTCPPorts[0];
-    this.port2 = freeTCPPorts[1];
-    int port3 = freeTCPPorts[2];
-    DistributedTestUtils.deleteLocatorStateFile(port1, port2, port3);
-    final String host0 = NetworkUtils.getServerHostName();
-    final String locators =
-        host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]," + host0 + "[" + port3 + "]";
-
-    final Properties dsProps = getBasicProperties(locators);
+    addIgnoredException("ForcedDisconnectException");
+    addIgnoredException("Possible loss of quorum");
+    addIgnoredException("java.lang.Exception: Message id is");
+
+    String locators =
+        hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]," + hostName + "[" + port3
+            + "]";
+
+    Properties dsProps = getBasicProperties(locators);
     dsProps.setProperty(LOG_LEVEL, logger.getLevel().name());
     dsProps.setProperty(DISABLE_AUTO_RECONNECT, "true");
     dsProps.setProperty(MEMBER_TIMEOUT, "2000");
-
     addDSProps(dsProps);
+
     startLocator(vm0, dsProps, port1);
     startLocator(vm1, dsProps, port2);
     startLocator(vm2, dsProps, port3);
 
-    try {
-      vm3.invoke(() -> {
-        getConnectedDistributedSystem(dsProps);
-        return true;
-      });
-      vm4.invoke(() -> {
-        getConnectedDistributedSystem(dsProps);
+    vm3.invoke(() -> {
+      getConnectedDistributedSystem(dsProps);
+    });
+    vm4.invoke(() -> {
+      getConnectedDistributedSystem(dsProps);
 
-        await()
-            .until(() -> system.getDM().getViewMembers()
-                .size() == 5);
-        return true;
-      });
+      await().until(() -> system.getDM().getViewMembers().size() == 5);
+    });
 
-      vm0.invoke(this::forceDisconnect);
-      vm1.invoke(this::forceDisconnect);
-      vm2.invoke(this::forceDisconnect);
+    vm0.invoke(this::forceDisconnect);
+    vm1.invoke(this::forceDisconnect);
+    vm2.invoke(this::forceDisconnect);
 
-      SerializableRunnable waitForDisconnect = new SerializableRunnable("waitForDisconnect") {
-        @Override
-        public void run() {
-          await()
-              .until(() -> system == null);
-        }
-      };
-      vm0.invoke(() -> waitForDisconnect);
-      vm1.invoke(() -> waitForDisconnect);
-      vm2.invoke(() -> waitForDisconnect);
+    SerializableRunnable waitForDisconnect = new SerializableRunnable("waitForDisconnect") {
+      @Override
+      public void run() {
+        await()
+            .until(() -> system == null);
+      }
+    };
+    vm0.invoke(() -> waitForDisconnect);
+    vm1.invoke(() -> waitForDisconnect);
+    vm2.invoke(() -> waitForDisconnect);
+
+    String newLocators = hostName + "[" + port2 + "]," + hostName + "[" + port3 + "]";
+    dsProps.setProperty(LOCATORS, newLocators);
+
+    getBlackboard().initBlackboard();
+    AsyncInvocation async1 = vm1.invokeAsync(() -> {
+      getBlackboard().signalGate("vm1ready");
+      getBlackboard().waitForGate("readyToConnect", 30, SECONDS);
+      System.out.println("vm1 is ready to connect");
+      startLocatorBase(dsProps, port2);
+    });
+    AsyncInvocation async2 = vm2.invokeAsync(() -> {
+      getBlackboard().signalGate("vm2ready");
+      getBlackboard().waitForGate("readyToConnect", 30, SECONDS);
+      System.out.println("vm2 is ready to connect");
+      startLocatorBase(dsProps, port3);
+    });
+    getBlackboard().waitForGate("vm1ready", 30, SECONDS);
+    getBlackboard().waitForGate("vm2ready", 30, SECONDS);
+    getBlackboard().signalGate("readyToConnect");
 
-      final String newLocators = host0 + "[" + port2 + "]," + host0 + "[" + port3 + "]";
-      dsProps.setProperty(LOCATORS, newLocators);
+    async1.await();
+    async2.await();
 
-      getBlackboard().initBlackboard();
-      AsyncInvocation async1 = vm1.invokeAsync(() -> {
-        getBlackboard().signalGate("vm1ready");
-        getBlackboard().waitForGate("readyToConnect", 30, SECONDS);
-        System.out.println("vm1 is ready to connect");
-        startLocatorBase(dsProps, port2);
-      });
-      AsyncInvocation async2 = vm2.invokeAsync(() -> {
-        getBlackboard().signalGate("vm2ready");
-        getBlackboard().waitForGate("readyToConnect", 30, SECONDS);
-        System.out.println("vm2 is ready to connect");
-        startLocatorBase(dsProps, port3);
-      });
-      getBlackboard().waitForGate("vm1ready", 30, SECONDS);
-      getBlackboard().waitForGate("vm2ready", 30, SECONDS);
-      getBlackboard().signalGate("readyToConnect");
-      async1.join();
-      async2.join();
-
-      vm1.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-      vm2.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
-
-      if (vm1.invoke(() -> system.getDistributedMember().equals(getView().getCreator()))) {
-        assertFalse(
-            vm2.invoke("Checking ViewCreator",
-                () -> system.getDistributedMember().equals(getView().getCreator())));
-      } else {
-        assertTrue(
-            vm2.invoke("Checking ViewCreator",
-                () -> system.getDistributedMember().equals(getView().getCreator())));
-      }
+    vm1.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
+    vm2.invoke("waitUntilLocatorBecomesCoordinator", this::waitUntilLocatorBecomesCoordinator);
 
-    } finally {
-      vm2.invoke(LocatorDUnitTest::stopLocator);
-      vm1.invoke(LocatorDUnitTest::stopLocator);
+    if (vm1.invoke(() -> system.getDistributedMember().equals(getView().getCreator()))) {
+      assertFalse(
+          vm2.invoke("Checking ViewCreator",
+              () -> system.getDistributedMember().equals(getView().getCreator())));
+    } else {
+      assertTrue(
+          vm2.invoke("Checking ViewCreator",
+              () -> system.getDistributedMember().equals(getView().getCreator())));
     }
   }
 
-
   /**
    * Tests that a VM can connect to a locator that is hosted in its own VM.
    */
   @Test
   public void testConnectToOwnLocator() throws Exception {
-    port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-
-    final String locators = NetworkUtils.getServerHostName() + "[" + port1 + "]";
+    String locators = hostName + "[" + port1 + "]";
 
     Properties props = getBasicProperties(locators);
     props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
-    Locator locator = Locator.startLocatorAndDS(port1, new File(""), props);
+
+    Locator locator = Locator.startLocatorAndDS(port1, null, props);
     system = (InternalDistributedSystem) locator.getDistributedSystem();
     system.disconnect();
     locator.stop();
-
   }
 
   /**
@@ -1636,35 +1331,19 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testHostingMultipleLocators() throws Exception {
-    int[] randomAvailableTCPPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    port1 = randomAvailableTCPPorts[0];
-    File logFile1 = new File("");
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    Locator locator1 = Locator.startLocator(port1, logFile1);
+    Locator.startLocator(port1, null);
 
     try {
+      Locator.startLocator(port2, null);
+      fail("expected second locator start to fail.");
+    } catch (IllegalStateException expected) {
+    }
 
-      int port2 = randomAvailableTCPPorts[1];
-      DistributedTestUtils.deleteLocatorStateFile(port2);
-
-      try {
-        Locator.startLocator(port2, new File(""));
-        fail("expected second locator start to fail.");
-      } catch (IllegalStateException expected) {
-      }
-
-      final String host0 = NetworkUtils.getServerHostName();
-      final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
-
-      Properties props = getBasicProperties(locators);
-      props.setProperty(LOG_LEVEL, logger.getLevel().name());
-      getConnectedDistributedSystem(props);
+    String locators = hostName + "[" + port1 + "]," + hostName + "[" + port2 + "]";
 
-      disconnectDistributedSystem();
+    Properties props = getBasicProperties(locators);
 
-    } finally {
-      locator1.stop();
-    }
+    getConnectedDistributedSystem(props);
   }
 
   /**
@@ -1674,136 +1353,79 @@ public class LocatorDUnitTest implements java.io.Serializable {
    */
   @Test
   public void testRestartLocator() throws Exception {
-    port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    File logFile = new File("");
     File stateFile = new File("locator" + port1 + "state.dat");
-    VM vm = VM.getVM(0);
-    final Properties properties =
-        getBasicProperties(Host.getHost(0).getHostName() + "[" + port1 + "]");
+
+    Properties properties = getBasicProperties(getHostName() + "[" + port1 + "]");
     properties.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
     properties.setProperty(LOG_LEVEL, DUnitLauncher.logLevel);
     addDSProps(properties);
+
     if (stateFile.exists()) {
       assertThat(stateFile.delete()).isTrue();
-
     }
 
     logger.info("Starting locator");
-    Locator locator = Locator.startLocatorAndDS(port1, logFile, properties);
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
     system = (InternalDistributedSystem) locator.getDistributedSystem();
-    try {
 
-      vm.invoke(() -> {
-        getConnectedDistributedSystem(properties);
-        return true;
-      });
-
-      logger.info("Stopping locator");
-      locator.stop();
-
-      logger.info("Starting locator");
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
+    vm0.invoke(() -> {
+      getConnectedDistributedSystem(properties);
+    });
 
-      vm.invoke("disconnect", () -> {
-        getConnectedDistributedSystem(properties).disconnect();
-        return null;
-      });
+    logger.info("Stopping locator");
+    locator.stop();
 
-    } finally {
-      locator.stop();
-    }
+    logger.info("Starting locator");
+    locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
 
+    vm0.invoke("disconnect", () -> {
+      getConnectedDistributedSystem(properties).disconnect();
+    });
   }
 
   /**
-   * See GEODE-3588 - a locator is restarted twice with a server and ends up in a split-brain
+   * a locator is restarted twice with a server and ends up in a split-brain
    */
   @Test
   public void testRestartLocatorMultipleTimes() throws Exception {
-    port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    File logFile = new File("");
     File stateFile = new File("locator" + port1 + "state.dat");
-    VM vm = VM.getVM(0);
-    final Properties properties =
-        getBasicProperties(Host.getHost(0).getHostName() + "[" + port1 + "]");
+
+    Properties properties = getBasicProperties(getHostName() + "[" + port1 + "]");
     addDSProps(properties);
+
     if (stateFile.exists()) {
       assertThat(stateFile.delete()).isTrue();
     }
 
-    Locator locator = Locator.startLocatorAndDS(port1, logFile, properties);
+    Locator locator = Locator.startLocatorAndDS(port1, null, properties);
     system = (InternalDistributedSystem) locator.getDistributedSystem();
 
-    vm.invoke(() -> {
+    vm0.invoke(() -> {
       getConnectedDistributedSystem(properties);
-      return null;
     });
 
-    try {
-      locator.stop();
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      assertEquals(2, ((InternalDistributedSystem) locator.getDistributedSystem()).getDM()
-          .getViewMembers().size());
-
-      locator.stop();
-      locator = Locator.startLocatorAndDS(port1, logFile, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      assertEquals(2, ((InternalDistributedSystem) locator.getDistributedSystem()).getDM()
-          .getViewMembers().size());
-
-    } finally {
-      vm.invoke("disconnect", () -> {
-        getConnectedDistributedSystem(properties).disconnect();
-        return null;
-      });
-      locator.stop();
-    }
-
-  }
-
-  @Before
-  public final void setUp() {
-    port1 = -1;
-    port2 = -1;
-    IgnoredException.addIgnoredException("Removing shunned member");
-  }
+    locator.stop();
+    locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
+    assertEquals(2, ((InternalDistributedSystem) locator.getDistributedSystem()).getDM()
+        .getViewMembers().size());
 
-  @After
-  public final void tearDown() {
-    if (Locator.hasLocator()) {
-      Locator.getLocator().stop();
-    }
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    if (cache != null && !cache.isClosed()) {
-      cache.close();
-    }
-    // delete locator state files so they don't accidentally
-    // get used by other tests
-    if (port1 > 0) {
-      DistributedTestUtils.deleteLocatorStateFile(port1);
-    }
-    if (port2 > 0) {
-      DistributedTestUtils.deleteLocatorStateFile(port2);
-    }
-    if (system != null) {
-      system.disconnect();
-      system = null;
-    }
+    locator.stop();
+    locator = Locator.startLocatorAndDS(port1, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
+    assertEquals(2, ((InternalDistributedSystem) locator.getDistributedSystem()).getDM()
+        .getViewMembers().size());
   }
 
-  // for child classes
   protected void addDSProps(Properties p) {
-    p.put(ENABLE_CLUSTER_CONFIGURATION, "false");
-    p.put(USE_CLUSTER_CONFIGURATION, "false");
+    p.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
+    p.setProperty(USE_CLUSTER_CONFIGURATION, "false");
   }
 
-  protected static InternalDistributedSystem getConnectedDistributedSystem(Properties properties) {
+  static InternalDistributedSystem getConnectedDistributedSystem(Properties properties) {
     if (system == null || !system.isConnected()) {
-      properties.put(NAME, "vm" + VM.getCurrentVMNum());
+      properties.setProperty(NAME, "vm" + VM.getCurrentVMNum());
       system = (InternalDistributedSystem) DistributedSystem.connect(properties);
     }
     return system;
@@ -1811,7 +1433,7 @@ public class LocatorDUnitTest implements java.io.Serializable {
 
   private void startLocatorWithPortAndProperties(final int port, final Properties properties)
       throws IOException {
-    Locator locator = Locator.startLocatorAndDS(port, new File(""), properties);
+    Locator locator = Locator.startLocatorAndDS(port, null, properties);
     system = (InternalDistributedSystem) locator.getDistributedSystem();
     assertThat(locator).isNotNull();
   }
@@ -1871,7 +1493,7 @@ public class LocatorDUnitTest implements java.io.Serializable {
       if (member != null) {
         return member.equals(lead);
       }
-      return (lead == null);
+      return lead == null;
     });
   }
 
@@ -1885,21 +1507,17 @@ public class LocatorDUnitTest implements java.io.Serializable {
     });
   }
 
-  private Locator startLocatorBase(Properties properties, int port) {
-    File lf = new File("");
-    try {
-      properties.put(NAME, "vm" + VM.getCurrentVMNum());
-      Locator locator = Locator.startLocatorAndDS(port, lf, properties);
-      system = (InternalDistributedSystem) locator.getDistributedSystem();
-      return locator;
-    } catch (IOException ios) {
-      throw new RuntimeException("Unable to start locator", ios);
-    }
+  private Locator startLocatorBase(Properties properties, int port) throws IOException {
+    properties.setProperty(NAME, "vm" + VM.getCurrentVMNum());
+
+    Locator locator = Locator.startLocatorAndDS(port, null, properties);
+    system = (InternalDistributedSystem) locator.getDistributedSystem();
+
+    return locator;
   }
 
   void startLocatorWithSomeBasicProperties(VM vm, int port) {
     Properties locProps = new Properties();
-    locProps.setProperty(MCAST_PORT, "0");
     locProps.setProperty(MEMBER_TIMEOUT, "1000");
     addDSProps(locProps);
 
@@ -1909,14 +1527,15 @@ public class LocatorDUnitTest implements java.io.Serializable {
   private void startLocatorPreferredCoordinators(VM vm0, int port) {
     try {
       System.setProperty(InternalLocator.LOCATORS_PREFERRED_AS_COORDINATORS, "true");
+
       Properties locProps1 = new Properties();
-      locProps1.put(MCAST_PORT, "0");
-      locProps1.put(LOG_LEVEL, logger.getLevel().name());
+      locProps1.setProperty(MCAST_PORT, "0");
+      locProps1.setProperty(LOG_LEVEL, logger.getLevel().name());
       addDSProps(locProps1);
 
       startLocator(vm0, locProps1, port);
     } finally {
-      System.getProperties().remove(InternalLocator.LOCATORS_PREFERRED_AS_COORDINATORS);
+      System.clearProperty(InternalLocator.LOCATORS_PREFERRED_AS_COORDINATORS);
     }
   }
 
@@ -1934,11 +1553,6 @@ public class LocatorDUnitTest implements java.io.Serializable {
       if (sys != null && sys.isConnected()) {
         sys.disconnect();
       }
-      // connectExceptions occur during disconnect, so we need the
-      // ExpectedException hint to be in effect until this point
-      LogWriter bLogger = new LocalLogWriter(ALL.intLevel(), System.out);
-      bLogger
-          .info("<ExpectedException action=remove>java.net.ConnectException</ExpectedException>");
     });
   }
 
@@ -1948,25 +1562,22 @@ public class LocatorDUnitTest implements java.io.Serializable {
 
   Properties getBasicProperties(String locators) {
     Properties props = new Properties();
-    props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, locators);
     return props;
   }
 
   private Properties getClusterProperties(String locators, String s) {
-    final Properties properties = getBasicProperties(locators);
-    properties.put(ENABLE_NETWORK_PARTITION_DETECTION, s);
-    properties.put(DISABLE_AUTO_RECONNECT, "true");
-    properties.put(MEMBER_TIMEOUT, "2000");
-    properties.put(LOG_LEVEL, logger.getLevel().name());
-    properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
-    properties.put(USE_CLUSTER_CONFIGURATION, "false");
-    properties.put(LOCATOR_WAIT_TIME, "10"); // seconds
+    Properties properties = getBasicProperties(locators);
+    properties.setProperty(DISABLE_AUTO_RECONNECT, "true");
+    properties.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
+    properties.setProperty(ENABLE_NETWORK_PARTITION_DETECTION, s);
+    properties.setProperty(LOCATOR_WAIT_TIME, "10"); // seconds
+    properties.setProperty(MEMBER_TIMEOUT, "2000");
+    properties.setProperty(USE_CLUSTER_CONFIGURATION, "false");
     return properties;
   }
 
   private void waitUntilTheSystemIsConnected(VM vm2, VM locatorVM) {
-
     await().until(() -> {
       assertThat(isSystemConnected())
           .describedAs("Distributed system should not have disconnected")
@@ -1977,9 +1588,10 @@ public class LocatorDUnitTest implements java.io.Serializable {
     });
   }
 
-
-  // New test hook which blocks before closing channel.
-  static class TestHook implements MembershipTestHook {
+  /**
+   * New test hook which blocks before closing channel.
+   */
+  private class TestHook implements MembershipTestHook {
 
     volatile boolean unboundedWait = true;
 
@@ -1992,31 +1604,31 @@ public class LocatorDUnitTest implements java.io.Serializable {
           Wait.pause(1000);
         }
       } else {
-        cause.printStackTrace();
+        errorCollector.addError(cause);
       }
     }
 
-    @Override
-    public void afterMembershipFailure(String reason, Throwable cause) {}
-
     void reset() {
       unboundedWait = false;
     }
-
   }
 
-  static class MyMembershipListener implements MembershipListener {
+  private static class MyMembershipListener implements MembershipListener {
 
-    boolean quorumLostInvoked;
+    volatile boolean quorumLostInvoked;
     final List<String> suspectReasons = new ArrayList<>(50);
 
     @Override
     public void memberJoined(DistributionManager distributionManager,
-        InternalDistributedMember id) {}
+        InternalDistributedMember id) {
+      // nothing
+    }
 
     @Override
     public void memberDeparted(DistributionManager distributionManager,
-        InternalDistributedMember id, boolean crashed) {}
+        InternalDistributedMember id, boolean crashed) {
+      // nothing
+    }
 
     @Override
     public void memberSuspect(DistributionManager distributionManager, InternalDistributedMember id,
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorUDPSecurityDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorUDPSecurityDUnitTest.java
index ba584be..ab778c2 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorUDPSecurityDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/distributed/LocatorUDPSecurityDUnitTest.java
@@ -25,22 +25,12 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.GemFireConfigException;
-import org.apache.geode.internal.AvailablePort;
-import org.apache.geode.test.dunit.DistributedTestUtils;
-import org.apache.geode.test.dunit.NetworkUtils;
-import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.junit.categories.MembershipTest;
 
-@Category({MembershipTest.class})
+@Category(MembershipTest.class)
 public class LocatorUDPSecurityDUnitTest extends LocatorDUnitTest {
 
   @Override
-  @Test
-  public void testMultipleLocatorsRestartingAtSameTimeWithMissingServers() throws Exception {
-    super.testMultipleLocatorsRestartingAtSameTimeWithMissingServers();
-  }
-
-  @Override
   protected void addDSProps(Properties p) {
     super.addDSProps(p);
     p.setProperty(SECURITY_UDP_DHALGO, "AES:128");
@@ -48,23 +38,17 @@ public class LocatorUDPSecurityDUnitTest extends LocatorDUnitTest {
 
   @Test
   public void testLocatorWithUDPSecurityButServer() {
-    VM vm = VM.getVM(0);
-
-    final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    DistributedTestUtils.deleteLocatorStateFile(port1);
-    final String locators = NetworkUtils.getServerHostName() + "[" + port + "]";
+    String locators = hostName + "[" + port1 + "]";
 
-    startLocatorWithSomeBasicProperties(vm, port);
+    startLocatorWithSomeBasicProperties(vm0, port1);
 
     try {
       Properties props = getBasicProperties(locators);
       props.setProperty(MEMBER_TIMEOUT, "1000");
       system = getConnectedDistributedSystem(props);
       fail("Should not have reached this line, it should have caught the exception.");
-    } catch (GemFireConfigException gce) {
-      assertThat(gce.getMessage()).contains("Rejecting findCoordinatorRequest");
-    } finally {
-      vm.invoke(LocatorDUnitTest::stopLocator);
+    } catch (GemFireConfigException e) {
+      assertThat(e.getMessage()).contains("Rejecting findCoordinatorRequest");
     }
   }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionCacheXMLExampleDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionCacheXMLExampleDUnitTest.java
index aed3a0d..c705634 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionCacheXMLExampleDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionCacheXMLExampleDUnitTest.java
@@ -16,7 +16,7 @@ package org.apache.geode.internal.cache;
 
 import static org.apache.geode.cache.Region.SEPARATOR;
 import static org.apache.geode.distributed.ConfigurationProperties.CACHE_XML_FILE;
-import static org.apache.geode.test.dunit.Host.getHost;
+import static org.apache.geode.test.dunit.VM.getVM;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.assertj.core.api.Assertions.assertThat;
 
@@ -33,7 +33,6 @@ import org.apache.geode.test.dunit.cache.CacheTestCase;
 /**
  * This class tests regions created by xml files
  */
-
 public class PartitionedRegionCacheXMLExampleDUnitTest extends CacheTestCase {
 
   private static final String CACHE_XML_FILE_1 = "PartitionRegionCacheExample1.xml";
@@ -44,14 +43,15 @@ public class PartitionedRegionCacheXMLExampleDUnitTest extends CacheTestCase {
   private static final String PARTITIONED_SUBREGION_NAME =
       SEPARATOR + "root" + SEPARATOR + "PartitionedSubRegion";
 
+  private String cacheXmlFileName;
+
   private VM vm0;
   private VM vm1;
-  private String cacheXmlFileName;
 
   @Before
   public void setUp() throws Exception {
-    vm0 = getHost(0).getVM(0);
-    vm1 = getHost(0).getVM(1);
+    vm0 = getVM(0);
+    vm1 = getVM(1);
   }
 
   @After
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/StartServerWithXmlDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/StartServerWithXmlDUnitTest.java
index ff5a6d8..9d41094 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/StartServerWithXmlDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/StartServerWithXmlDUnitTest.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.internal.cache;
 
 import static org.apache.geode.distributed.ConfigurationProperties.CACHE_XML_FILE;
@@ -22,6 +21,7 @@ import static org.assertj.core.api.Assertions.assertThat;
 
 import java.util.Properties;
 
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -35,6 +35,8 @@ import org.apache.geode.test.dunit.rules.MemberVM;
 
 public class StartServerWithXmlDUnitTest {
 
+  private static Cache cache;
+
   private VM server;
   private MemberVM locator;
 
@@ -45,28 +47,36 @@ public class StartServerWithXmlDUnitTest {
   public void before() throws Exception {
     locator = cluster.startLocatorVM(0);
 
-    Properties props = new Properties();
     String locators = "localhost[" + locator.getPort() + "]";
-    props.setProperty(LOCATORS, locators);
     String cacheXmlPath =
         createTempFileFromResource(getClass(), "CacheServerWithZeroPort.xml")
             .getAbsolutePath();
+
+    Properties props = new Properties();
+    props.setProperty(LOCATORS, locators);
     props.setProperty(CACHE_XML_FILE, cacheXmlPath);
 
     server = cluster.getVM(1);
 
     server.invoke(() -> {
       ServerLauncherParameters.INSTANCE.withBindAddress("localhost");
-      CacheFactory cf = new CacheFactory(props);
-      Cache cache = cf.create();
+      cache = new CacheFactory(props).create();
+    });
+  }
+
+  @After
+  public void tearDown() {
+    server.invoke(() -> {
+      cache.close();
+      cache = null;
     });
   }
 
   @Test
   public void startServerWithXMLNotToStartDefaultCacheServer() {
-    // Verify that when there is a declarative cache server then we dont launch default server
+    // Verify that when there is a declarative cache server then we don't launch default server
     server.invoke(() -> {
-      assertThat(GemFireCacheImpl.getInstance().getCacheServers().size()).isEqualTo(1);
+      assertThat(cache.getCacheServers().size()).isEqualTo(1);
     });
   }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/MembershipTestHook.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/MembershipTestHook.java
index b9c45bf..1db7d89 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/MembershipTestHook.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/MembershipTestHook.java
@@ -15,19 +15,21 @@
 package org.apache.geode.distributed.internal.membership;
 
 /**
- * Test hook for hydra test development
- *
- *
+ * Test hook for membership test development
  */
 public interface MembershipTestHook {
+
   /**
    * test hook invoked prior to shutting down distributed system
    */
-  void beforeMembershipFailure(String reason, Throwable cause);
+  default void beforeMembershipFailure(String reason, Throwable cause) {
+    // nothing
+  }
 
   /**
    * test hook invoked after shutting down distributed system
    */
-  void afterMembershipFailure(String reason, Throwable cause);
-
+  default void afterMembershipFailure(String reason, Throwable cause) {
+    // nothing
+  }
 }