You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/04/25 20:42:50 UTC

[1/6] geode git commit: Safe refactorings [Forced Update!]

Repository: geode
Updated Branches:
  refs/heads/feature/GEODE-2632-6-1 8ac3c0185 -> b605f5d3d (forced update)


http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/TXWriterTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/TXWriterTestCase.java b/geode-core/src/test/java/org/apache/geode/TXWriterTestCase.java
index 987f22f..a75e167 100644
--- a/geode-core/src/test/java/org/apache/geode/TXWriterTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/TXWriterTestCase.java
@@ -14,30 +14,43 @@
  */
 package org.apache.geode;
 
-import org.apache.geode.cache.*;
-import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.util.Properties;
+
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 
-import java.util.Properties;
-
-import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.AttributesMutator;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.CacheTransactionManager;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.cache.TransactionEvent;
+import org.apache.geode.cache.TransactionListener;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
 
 /**
  * Extracted from TXWriterJUnitTest to share with TXWriterOOMEJUnitTest.
- * 
  */
 @SuppressWarnings("deprecation")
 public class TXWriterTestCase {
 
-  protected int cbCount;
-  protected int failedCommits = 0;
-  protected int afterCommits = 0;
-  protected int afterRollbacks = 0;
+  int cbCount;
+  int failedCommits = 0;
+  int afterCommits = 0;
+  int afterRollbacks = 0;
 
   protected GemFireCacheImpl cache;
   protected CacheTransactionManager txMgr;
@@ -46,10 +59,13 @@ public class TXWriterTestCase {
   protected void createCache() throws CacheException {
     Properties p = new Properties();
     p.setProperty(MCAST_PORT, "0"); // loner
+
     this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
-    AttributesFactory<?, ?> af = new AttributesFactory<String, String>();
+
+    AttributesFactory<String, String> af = new AttributesFactory<>();
     af.setScope(Scope.DISTRIBUTED_NO_ACK);
     af.setIndexMaintenanceSynchronous(true);
+
     this.region = this.cache.createRegion("TXTest", af.create());
     this.txMgr = this.cache.getCacheTransactionManager();
   }
@@ -95,79 +111,96 @@ public class TXWriterTestCase {
     }
   }
 
-  protected void installCacheListenerAndWriter() {
+  void installCacheListenerAndWriter() {
     AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
     mutator.setCacheListener(new CacheListenerAdapter<String, String>() {
+      @Override
       public void close() {
         cbCount++;
       }
 
+      @Override
       public void afterCreate(EntryEvent<String, String> event) {
         cbCount++;
       }
 
+      @Override
       public void afterUpdate(EntryEvent<String, String> event) {
         cbCount++;
       }
 
+      @Override
       public void afterInvalidate(EntryEvent<String, String> event) {
         cbCount++;
       }
 
+      @Override
       public void afterDestroy(EntryEvent<String, String> event) {
         cbCount++;
       }
 
+      @Override
       public void afterRegionInvalidate(RegionEvent<String, String> event) {
         cbCount++;
       }
 
+      @Override
       public void afterRegionDestroy(RegionEvent<String, String> event) {
         cbCount++;
       }
     });
     mutator.setCacheWriter(new CacheWriter<String, String>() {
+      @Override
       public void close() {
         cbCount++;
       }
 
+      @Override
       public void beforeUpdate(EntryEvent<String, String> event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeCreate(EntryEvent<String, String> event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeDestroy(EntryEvent<String, String> event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeRegionDestroy(RegionEvent<String, String> event)
           throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeRegionClear(RegionEvent<String, String> event) throws CacheWriterException {
         cbCount++;
       }
     });
   }
 
-  protected void installTransactionListener() {
-    ((CacheTransactionManager) this.txMgr).setListener(new TransactionListener() {
+  void installTransactionListener() {
+    this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         failedCommits++;
       }
 
+      @Override
       public void afterCommit(TransactionEvent event) {
         afterCommits++;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         afterRollbacks++;
       }
 
+      @Override
       public void close() {}
     });
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
index 903b212..15cfaa7 100755
--- a/geode-core/src/test/java/org/apache/geode/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
@@ -946,8 +946,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
     server.invoke(new SerializableCallable() {
       public Object call() throws Exception {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-        cache.TEST_MAX_QUERY_EXECUTION_TIME_OVERRIDE_EXCEPTION = false;
-        cache.TEST_MAX_QUERY_EXECUTION_TIME = -1;
+        cache.testMaxQueryExecutionTime = -1;
         return null;
       }
     });
@@ -972,11 +971,9 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
 
         if (queryTimeout != -1) {
-          cache.TEST_MAX_QUERY_EXECUTION_TIME_OVERRIDE_EXCEPTION = true;
-          cache.TEST_MAX_QUERY_EXECUTION_TIME = queryTimeout;
+          cache.testMaxQueryExecutionTime = queryTimeout;
         } else {
-          cache.TEST_MAX_QUERY_EXECUTION_TIME_OVERRIDE_EXCEPTION = false;
-          cache.TEST_MAX_QUERY_EXECUTION_TIME = -1;
+          cache.testMaxQueryExecutionTime = -1;
         }
 
         if (criticalThreshold != 0) {
@@ -1074,7 +1071,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
   private boolean isExceptionDueToTimeout(QueryException e, long queryTimeout) {
     String message = e.getMessage();
     // -1 needs to be matched due to client/server set up, BaseCommand uses the
-    // MAX_QUERY_EXECUTION_TIME and not the TEST_MAX_QUERY_EXECUTION_TIME
+    // MAX_QUERY_EXECUTION_TIME and not the testMaxQueryExecutionTime
     return (message.contains("The QueryMonitor thread may be sleeping longer than")
         || message.contains(LocalizedStrings.QueryMonitor_LONG_RUNNING_QUERY_CANCELED
             .toLocalizedString(queryTimeout))

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/cache/query/internal/index/NewDeclarativeIndexCreationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/internal/index/NewDeclarativeIndexCreationJUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/internal/index/NewDeclarativeIndexCreationJUnitTest.java
index 8a0f31c..e7f5c08 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/query/internal/index/NewDeclarativeIndexCreationJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/internal/index/NewDeclarativeIndexCreationJUnitTest.java
@@ -12,161 +12,154 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-/**
- * 
- */
 package org.apache.geode.cache.query.internal.index;
 
-import org.apache.geode.cache.*;
+import static org.apache.geode.distributed.ConfigurationProperties.CACHE_XML_FILE;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.geode.InternalGemFireException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.CacheXmlException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.apache.geode.util.test.TestUtil;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
 
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.Collection;
+import java.io.File;
 import java.util.Properties;
 
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
 /**
- *
  * @since GemFire 6.6.1
  */
 @Category(IntegrationTest.class)
 public class NewDeclarativeIndexCreationJUnitTest {
 
-  private Cache cache = null;
+  private static final String CACHE_XML_FILE_NAME = "cachequeryindex.xml";
+
+  private Cache cache;
+  private File cacheXmlFile;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
 
   @Before
   public void setUp() throws Exception {
-    // Read the Cache.xml placed in test.lib folder
+    this.cacheXmlFile = this.temporaryFolder.newFile(CACHE_XML_FILE_NAME);
+    FileUtils.copyURLToFile(getClass().getResource(CACHE_XML_FILE_NAME), this.cacheXmlFile);
+    assertThat(this.cacheXmlFile).exists(); // precondition
+
     Properties props = new Properties();
-    props.setProperty(CACHE_XML_FILE, TestUtil.getResourcePath(getClass(), "cachequeryindex.xml"));
+    props.setProperty(CACHE_XML_FILE, this.cacheXmlFile.getAbsolutePath());
     props.setProperty(MCAST_PORT, "0");
     DistributedSystem ds = DistributedSystem.connect(props);
-    cache = CacheFactory.create(ds);
+    this.cache = CacheFactory.create(ds);
   }
 
   @After
   public void tearDown() throws Exception {
-    if (!cache.isClosed())
-      cache.close();
+    if (this.cache != null) {
+      this.cache.close();
+    }
   }
 
   @Test
   public void testAsynchronousIndexCreatedOnRoot_PortfoliosRegion() {
-    Region root = cache.getRegion("/root/portfolios");
+    Region root = this.cache.getRegion("/root/portfolios");
     IndexManager im = IndexUtils.getIndexManager(root, true);
-    Collection coll = im.getIndexes();
-    if (coll.size() > 0) {
-      Assert.assertTrue(true);
-      // System.out.println("List of indexes= " + im.toString());
-      RegionAttributes ra = root.getAttributes();
-      Assert.assertTrue(!ra.getIndexMaintenanceSynchronous());
-    } else
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testAsynchronousIndexCreatedOnRoot_PortfoliosRegion:No index found in the root region");
+    assertThat(im.getIndexes()).isNotEmpty();
+
+    RegionAttributes ra = root.getAttributes();
+    assertThat(ra.getIndexMaintenanceSynchronous()).isFalse();
   }
 
   @Test
   public void testSynchronousIndexCreatedOnRoot_StringRegion() {
-    Region root = cache.getRegion("/root/string");
-    IndexManager im = IndexUtils.getIndexManager(root, true);;
-    Collection coll = im.getIndexes();
-    if (coll.size() > 0) {
-      Assert.assertTrue(true);
-      // System.out.println("List of indexes= " + im.toString());
-      RegionAttributes ra = root.getAttributes();
-      Assert.assertTrue(ra.getIndexMaintenanceSynchronous());
-    } else
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testSynchronousIndexCreatedOnRoot_StringRegion Region:No index found in the root region");
-    root = cache.getRegion("/root/string1");
+    Region root = this.cache.getRegion("/root/string");
+    IndexManager im = IndexUtils.getIndexManager(root, true);
+    assertThat(im.getIndexes()).isNotEmpty();
+
+    RegionAttributes ra = root.getAttributes();
+    assertThat(ra.getIndexMaintenanceSynchronous()).isTrue();
+
+    root = this.cache.getRegion("/root/string1");
     im = IndexUtils.getIndexManager(root, true);
-    if (!im.isIndexMaintenanceTypeSynchronous())
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testSynchronousIndexCreatedOnRoot_StringRegion: The index update type not synchronous if no index-update-type attribuet specified in cache.cml");
+    assertThat(im.isIndexMaintenanceTypeSynchronous()).isTrue();
   }
 
   @Test
   public void testSynchronousIndexCreatedOnRootRegion() {
-    Region root = cache.getRegion("/root");
+    Region root = this.cache.getRegion("/root");
     IndexManager im = IndexUtils.getIndexManager(root, true);
-    Collection coll = im.getIndexes();
-    if (coll.size() > 0) {
-      Assert.assertTrue(true);
-      // System.out.println("List of indexes= " + im.toString());
-      RegionAttributes ra = root.getAttributes();
-      Assert.assertTrue(ra.getIndexMaintenanceSynchronous());
-    } else
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testAsynchronousIndexCreatedOnRoot_PortfoliosRegion:No index found in the root region");
+    assertThat(im.getIndexes()).isNotEmpty();
+
+    RegionAttributes ra = root.getAttributes();
+    assertThat(ra.getIndexMaintenanceSynchronous()).isTrue();
   }
 
 
-  // Index creation tests for new DTD changes for Index tag for 6.6.1 with no function/primary-key
-  // tag
+  /**
+   * Index creation tests for new DTD changes for Index tag for 6.6.1 with no function/primary-key
+   * tag
+   */
   @Test
   public void testAsynchronousIndexCreatedOnPortfoliosRegionWithNewDTD() {
-    Region root = cache.getRegion("/root/portfolios2");
+    Region root = this.cache.getRegion("/root/portfolios2");
     IndexManager im = IndexUtils.getIndexManager(root, true);
-    Collection coll = im.getIndexes();
-    if (coll.size() > 0) {
-      Assert.assertTrue(true);
-      // System.out.println("List of indexes= " + im.toString());
-      RegionAttributes ra = root.getAttributes();
-      Assert.assertTrue(!ra.getIndexMaintenanceSynchronous());
-    } else
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testAsynchronousIndexCreatedOnRoot_PortfoliosRegion:No index found in the root region");
+    assertThat(im.getIndexes()).isNotEmpty();
+
+    RegionAttributes ra = root.getAttributes();
+    assertThat(ra.getIndexMaintenanceSynchronous()).isFalse();
   }
 
   @Test
   public void testSynchronousIndexCreatedOnStringRegionWithNewDTD() {
-    Region root = cache.getRegion("/root/string2");
+    Region root = this.cache.getRegion("/root/string2");
     IndexManager im = IndexUtils.getIndexManager(root, true);;
-    Collection coll = im.getIndexes();
-    if (coll.size() > 0) {
-      Assert.assertTrue(true);
-      // System.out.println("List of indexes= " + im.toString());
-      RegionAttributes ra = root.getAttributes();
-      Assert.assertTrue(ra.getIndexMaintenanceSynchronous());
-    } else
-      Assert.fail(
-          "NewDeclarativeIndexCreationJUnitTest::testSynchronousIndexCreatedOnRoot_StringRegion Region:No index found in the root region");
-    root = cache.getRegion("/root/string1");
+    assertThat(im.getIndexes()).isNotEmpty();
+
+    RegionAttributes ra = root.getAttributes();
+    assertThat(ra.getIndexMaintenanceSynchronous()).isTrue();
+
+    root = this.cache.getRegion("/root/string1");
     im = IndexUtils.getIndexManager(root, true);
-    if (!im.isIndexMaintenanceTypeSynchronous())
-      Assert.fail(
-          "DeclarativeIndexCreationTest::testSynchronousIndexCreatedOnRoot_StringRegion: The index update type not synchronous if no index-update-type attribuet specified in cache.cml");
+    assertThat(im.isIndexMaintenanceTypeSynchronous()).isTrue();
   }
 
+  /**
+   * TODO: move this to a different test class because it requires different setup
+   */
   @Test
-  public void testIndexCreationExceptionOnRegionWithNewDTD()
-      throws IOException, URISyntaxException {
-    if (cache != null && !cache.isClosed())
-      cache.close();
+  public void testIndexCreationExceptionOnRegionWithNewDTD() throws Exception {
+    if (this.cache != null && !this.cache.isClosed()) {
+      this.cache.close();
+    }
+
+    this.cacheXmlFile = this.temporaryFolder.newFile("cachequeryindexwitherror.xml");
+    FileUtils.copyURLToFile(getClass().getResource("cachequeryindexwitherror.xml"),
+        this.cacheXmlFile);
+    assertThat(this.cacheXmlFile).exists(); // precondition
+
     Properties props = new Properties();
-    props.setProperty(CACHE_XML_FILE,
-        TestUtil.getResourcePath(getClass(), "cachequeryindexwitherror.xml"));
+    props.setProperty(CACHE_XML_FILE, this.cacheXmlFile.getAbsolutePath());
     props.setProperty(MCAST_PORT, "0");
+
     DistributedSystem ds = DistributedSystem.connect(props);
-    try {
-      Cache cache = CacheFactory.create(ds);
-    } catch (CacheXmlException e) {
-      if (!e.getCause().getMessage()
-          .contains("CacheXmlParser::endIndex:Index creation attribute not correctly specified.")) {
-        e.printStackTrace();
-        Assert.fail(
-            "NewDeclarativeIndexCreationJUnitTest::setup: Index creation should have thrown exception for index on /root/portfolios3 region.");
-      }
-      return;
-    }
+
+    // TODO: refactoring GemFireCacheImpl.initializeDeclarativeCache requires change here
+    assertThatThrownBy(() -> CacheFactory.create(ds)).isExactlyInstanceOf(CacheXmlException.class)
+        .hasCauseInstanceOf(InternalGemFireException.class);
+
+    // hasCauseMessageContaining("CacheXmlParser::endIndex:Index creation attribute not correctly
+    // specified.");
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistTXDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistTXDebugDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistTXDebugDUnitTest.java
index 0d2f2b6..e72823e 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistTXDebugDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistTXDebugDUnitTest.java
@@ -46,7 +46,6 @@ import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -60,7 +59,7 @@ public class DistTXDebugDUnitTest extends JUnit4CacheTestCase {
   protected VM accessor = null;
   protected VM dataStore1 = null;
   protected VM dataStore2 = null;
-  protected VM dataStore3 = null;
+  private VM dataStore3 = null;
 
   @Override
   public final void postSetUp() throws Exception {
@@ -106,30 +105,28 @@ public class DistTXDebugDUnitTest extends JUnit4CacheTestCase {
   public static void createPR(String partitionedRegionName, Integer redundancy,
       Integer localMaxMemory, Integer totalNumBuckets, Object colocatedWith,
       Boolean isPartitionResolver, Boolean concurrencyChecks) {
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-
-    paf.setRedundantCopies(redundancy.intValue());
+    PartitionAttributesFactory<String, String> paf = new PartitionAttributesFactory();
+    paf.setRedundantCopies(redundancy);
     if (localMaxMemory != null) {
-      paf.setLocalMaxMemory(localMaxMemory.intValue());
+      paf.setLocalMaxMemory(localMaxMemory);
     }
     if (totalNumBuckets != null) {
-      paf.setTotalNumBuckets(totalNumBuckets.intValue());
+      paf.setTotalNumBuckets(totalNumBuckets);
     }
     if (colocatedWith != null) {
       paf.setColocatedWith((String) colocatedWith);
     }
-    if (isPartitionResolver.booleanValue()) {
+    if (isPartitionResolver) {
       paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
     }
-    PartitionAttributes prAttr = paf.create();
-    AttributesFactory attr = new AttributesFactory();
+    PartitionAttributes<String, String> prAttr = paf.create();
+
+    AttributesFactory<String, String> attr = new AttributesFactory();
     attr.setPartitionAttributes(prAttr);
     attr.setConcurrencyChecksEnabled(concurrencyChecks);
-    // assertNotNull(basicGetCache());
-    // Region pr = basicGetCache().createRegion(partitionedRegionName,
-    // attr.create());
+
     assertNotNull(basicGetCache());
-    Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
+    Region<String, String> pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
     assertNotNull(pr);
     LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
@@ -912,55 +909,54 @@ public class DistTXDebugDUnitTest extends JUnit4CacheTestCase {
   public void testTXRR2_dataNodeAsCoordinator() throws Exception {
     performTXRRtestOps(true);
   }
-}
-
 
-class DummyKeyBasedRoutingResolver implements PartitionResolver, DataSerializable {
-  Integer dummyID;
+  private static class DummyKeyBasedRoutingResolver implements PartitionResolver, DataSerializable {
+    Integer dummyID;
 
-  public DummyKeyBasedRoutingResolver() {}
+    public DummyKeyBasedRoutingResolver() {}
 
-  public DummyKeyBasedRoutingResolver(int id) {
-    this.dummyID = new Integer(id);
-  }
+    public DummyKeyBasedRoutingResolver(int id) {
+      this.dummyID = new Integer(id);
+    }
 
-  public String getName() {
-    // TODO Auto-generated method stub
-    return null;
-  }
+    public String getName() {
+      // TODO Auto-generated method stub
+      return null;
+    }
 
-  public Serializable getRoutingObject(EntryOperation opDetails) {
-    return (Serializable) opDetails.getKey();
-  }
+    public Serializable getRoutingObject(EntryOperation opDetails) {
+      return (Serializable) opDetails.getKey();
+    }
 
-  public void close() {
-    // TODO Auto-generated method stub
-  }
+    public void close() {
+      // TODO Auto-generated method stub
+    }
 
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    this.dummyID = DataSerializer.readInteger(in);
-  }
+    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+      this.dummyID = DataSerializer.readInteger(in);
+    }
 
-  public void toData(DataOutput out) throws IOException {
-    DataSerializer.writeInteger(this.dummyID, out);
-  }
+    public void toData(DataOutput out) throws IOException {
+      DataSerializer.writeInteger(this.dummyID, out);
+    }
 
-  @Override
-  public int hashCode() {
-    int i = this.dummyID.intValue();
-    return i;
-  }
+    @Override
+    public int hashCode() {
+      int i = this.dummyID.intValue();
+      return i;
+    }
 
-  @Override
-  public boolean equals(Object o) {
-    if (this == o)
-      return true;
+    @Override
+    public boolean equals(Object o) {
+      if (this == o)
+        return true;
 
-    if (!(o instanceof DummyKeyBasedRoutingResolver))
-      return false;
+      if (!(o instanceof DummyKeyBasedRoutingResolver))
+        return false;
 
-    DummyKeyBasedRoutingResolver otherDummyID = (DummyKeyBasedRoutingResolver) o;
-    return (otherDummyID.dummyID.equals(dummyID));
+      DummyKeyBasedRoutingResolver otherDummyID = (DummyKeyBasedRoutingResolver) o;
+      return (otherDummyID.dummyID.equals(dummyID));
 
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistTXJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistTXJUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistTXJUnitTest.java
index 8abccc6..754b554 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistTXJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistTXJUnitTest.java
@@ -14,6 +14,15 @@
  */
 package org.apache.geode.disttx;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.TXJUnitTest;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheFactory;
@@ -21,51 +30,33 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.test.junit.categories.DistributedTransactionsTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.util.Properties;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
 
 /**
  * Run the basic transaction functionality tests in TXJUnitTest after setting
  * "distributed-transactions" property to true
- *
  */
 @Category({IntegrationTest.class, DistributedTransactionsTest.class})
 public class DistTXJUnitTest extends TXJUnitTest {
 
-  public DistTXJUnitTest() {}
-
   @Override
   protected void createCache() throws Exception {
     Properties p = new Properties();
     p.setProperty(MCAST_PORT, "0"); // loner
     p.setProperty(DISTRIBUTED_TRANSACTIONS, "true");
+
     this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
+
     createRegion();
     this.txMgr = this.cache.getCacheTransactionManager();
-    assert (this.txMgr.isDistributed());
+
+    assertTrue(this.txMgr.isDistributed());
+
     this.listenerAfterCommit = 0;
     this.listenerAfterFailedCommit = 0;
     this.listenerAfterRollback = 0;
     this.listenerClose = 0;
   }
 
-  @Before
-  public void setUp() throws Exception {
-    createCache();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    closeCache();
-  }
-
   @Override
   @Test
   @Ignore

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistTXPersistentDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistTXPersistentDebugDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistTXPersistentDebugDUnitTest.java
index 5753f5c..d999da9 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistTXPersistentDebugDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistTXPersistentDebugDUnitTest.java
@@ -16,6 +16,9 @@ package org.apache.geode.disttx;
 
 import static org.apache.geode.test.dunit.Assert.*;
 
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.cache.AttributesFactory;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheTransactionManager;
@@ -31,8 +34,6 @@ import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
 @Category(DistributedTest.class)
 public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
@@ -61,7 +62,7 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
     });
   }
 
-  protected void createPesistentPR(Object[] attributes) {
+  private void createPersistentPR(Object[] attributes) {
     dataStore1.invoke(DistTXPersistentDebugDUnitTest.class, "createPersistentPR", attributes);
     dataStore2.invoke(DistTXPersistentDebugDUnitTest.class, "createPersistentPR", attributes);
     // dataStore3.invoke(TxPersistentDebugDUnit.class, "createPR", attributes);
@@ -76,7 +77,7 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
         getPersistentPRAttributes(1, -1, basicGetCache(), 113, true));
   }
 
-  protected static RegionAttributes getPersistentPRAttributes(final int redundancy,
+  private static RegionAttributes getPersistentPRAttributes(final int redundancy,
       final int recoveryDelay, Cache cache, int numBuckets, boolean synchronous) {
     DiskStore ds = cache.findDiskStore("disk");
     if (ds == null) {
@@ -92,8 +93,7 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
     af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
     af.setDiskStoreName("disk");
     af.setDiskSynchronous(synchronous);
-    RegionAttributes attr = af.create();
-    return attr;
+    return af.create();
   }
 
   @Test
@@ -101,7 +101,8 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
     createCacheInAllVms();
     final String regionName = "persistentCustomerPRRegion";
     Object[] attrs = new Object[] {regionName};
-    createPesistentPR(attrs);
+    createPersistentPR(attrs);
+
     SerializableCallable TxOps = new SerializableCallable() {
       @Override
       public Object call() throws Exception {

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterJUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterJUnitTest.java
index 0a61b1f..dec2f88 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterJUnitTest.java
@@ -14,6 +14,13 @@
  */
 package org.apache.geode.disttx;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.TXWriterJUnitTest;
 import org.apache.geode.cache.AttributesFactory;
 import org.apache.geode.cache.CacheException;
@@ -24,11 +31,6 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.test.junit.categories.DistributedTransactionsTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.junit.experimental.categories.Category;
-
-import java.util.Properties;
-
-import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 
 /**
  * Same tests as that of {@link TXWriterJUnitTest} after setting "distributed-transactions" property
@@ -37,19 +39,22 @@ import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 @Category({IntegrationTest.class, DistributedTransactionsTest.class})
 public class DistTXWriterJUnitTest extends TXWriterJUnitTest {
 
-  public DistTXWriterJUnitTest() {}
-
+  @Override
   protected void createCache() throws CacheException {
-    Properties p = new Properties();
-    p.setProperty(MCAST_PORT, "0"); // loner
-    p.setProperty(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
-    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
-    AttributesFactory<?, ?> af = new AttributesFactory<String, String>();
-    af.setScope(Scope.DISTRIBUTED_NO_ACK);
-    af.setIndexMaintenanceSynchronous(true);
-    this.region = this.cache.createRegion("TXTest", af.create());
+    Properties properties = new Properties();
+    properties.setProperty(MCAST_PORT, "0"); // loner
+    properties.setProperty(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
+
+    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(properties));
+
+    AttributesFactory<String, String> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+    attributesFactory.setIndexMaintenanceSynchronous(true);
+
+    this.region = this.cache.createRegion("TXTest", attributesFactory.create());
     this.txMgr = this.cache.getCacheTransactionManager();
-    assert (this.txMgr.isDistributed());
+
+    assertTrue(this.txMgr.isDistributed());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterOOMEJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterOOMEJUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterOOMEJUnitTest.java
index b99d3fd..896530d 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterOOMEJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistTXWriterOOMEJUnitTest.java
@@ -14,21 +14,23 @@
  */
 package org.apache.geode.disttx;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.TXWriterOOMEJUnitTest;
 import org.apache.geode.cache.AttributesFactory;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Scope;
-import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.test.junit.categories.DistributedTransactionsTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.junit.experimental.categories.Category;
-
-import java.util.Properties;
-
-import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 
 /**
  * Same tests as that of {@link TXWriterOOMEJUnitTest} after setting "distributed-transactions"
@@ -37,19 +39,22 @@ import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 @Category({IntegrationTest.class, DistributedTransactionsTest.class})
 public class DistTXWriterOOMEJUnitTest extends TXWriterOOMEJUnitTest {
 
-  public DistTXWriterOOMEJUnitTest() {}
-
+  @Override
   protected void createCache() throws CacheException {
-    Properties p = new Properties();
-    p.setProperty(MCAST_PORT, "0"); // loner
-    p.setProperty(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
-    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
-    AttributesFactory<?, ?> af = new AttributesFactory<String, String>();
-    af.setScope(Scope.DISTRIBUTED_NO_ACK);
-    af.setIndexMaintenanceSynchronous(true);
-    this.region = this.cache.createRegion("TXTest", af.create());
+    Properties properties = new Properties();
+    properties.setProperty(MCAST_PORT, "0"); // loner
+    properties.setProperty(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
+
+    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(properties));
+
+    AttributesFactory<String, String> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+    attributesFactory.setIndexMaintenanceSynchronous(true);
+
+    this.region = this.cache.createRegion("TXTest", attributesFactory.create());
     this.txMgr = this.cache.getCacheTransactionManager();
-    assert (this.txMgr.isDistributed());
+
+    assertTrue(this.txMgr.isDistributed());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/DistributedTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/DistributedTransactionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/DistributedTransactionDUnitTest.java
index 5471565..fe79801 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/DistributedTransactionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/DistributedTransactionDUnitTest.java
@@ -44,7 +44,6 @@ import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.i18n.LogWriterI18n;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.cache.BucketRegion;
-import org.apache.geode.internal.cache.CacheServerImpl;
 import org.apache.geode.internal.cache.DistTXState;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.LocalRegion;
@@ -1383,7 +1382,7 @@ public class DistributedTransactionDUnitTest extends JUnit4CacheTestCase {
       @Override
       public Object call() throws Exception {
         CountDownLatch cdl = new CountDownLatch(1);
-        GemFireCacheImpl.internalBeforeApplyChanges = new WaitRelease(cdl, "TX OP");
+        DistTXState.internalBeforeApplyChanges = new WaitRelease(cdl, "TX OP");
         return null;
       }
     };
@@ -1396,7 +1395,7 @@ public class DistributedTransactionDUnitTest extends JUnit4CacheTestCase {
       @Override
       public Object call() throws Exception {
         CountDownLatch cdl = new CountDownLatch(1);
-        GemFireCacheImpl.internalBeforeNonTXBasicPut = new WaitRelease(cdl, "NON TX OP");
+        DistTXState.internalBeforeNonTXBasicPut = new WaitRelease(cdl, "NON TX OP");
         return null;
       }
     };
@@ -1457,7 +1456,7 @@ public class DistributedTransactionDUnitTest extends JUnit4CacheTestCase {
     execute(secondary, new SerializableCallable() {
       @Override
       public Object call() throws Exception {
-        Runnable r = GemFireCacheImpl.internalBeforeNonTXBasicPut;
+        Runnable r = DistTXState.internalBeforeNonTXBasicPut;
         assert (r != null && r instanceof WaitRelease);
         WaitRelease e = (WaitRelease) r;
         e.release();
@@ -1469,7 +1468,7 @@ public class DistributedTransactionDUnitTest extends JUnit4CacheTestCase {
     execute(secondary, new SerializableCallable() {
       @Override
       public Object call() throws Exception {
-        Runnable r = GemFireCacheImpl.internalBeforeApplyChanges;
+        Runnable r = DistTXState.internalBeforeApplyChanges;
         assert (r != null && r instanceof WaitRelease);
         WaitRelease e = (WaitRelease) r;
         e.release();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXJUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXJUnitTest.java
index f27c099..268a733 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXJUnitTest.java
@@ -14,6 +14,15 @@
  */
 package org.apache.geode.disttx;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.distributed.ConfigurationProperties;
@@ -22,33 +31,27 @@ import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.PRTXJUnitTest;
 import org.apache.geode.test.junit.categories.DistributedTransactionsTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.util.Properties;
-
-import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 
 /**
  * Same tests as that of {@link PRTXJUnitTest} after setting "distributed-transactions" property to
  * true
- *
  */
 @Category({IntegrationTest.class, DistributedTransactionsTest.class})
 public class PRDistTXJUnitTest extends PRTXJUnitTest {
 
-  public PRDistTXJUnitTest() {}
-
   @Override
   protected void createCache() throws Exception {
     Properties p = new Properties();
     p.setProperty(MCAST_PORT, "0"); // loner
     p.setProperty(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
+
     this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
+
     createRegion();
     this.txMgr = this.cache.getCacheTransactionManager();
-    assert (this.txMgr.isDistributed());
+
+    assertTrue(this.txMgr.isDistributed());
+
     this.listenerAfterCommit = 0;
     this.listenerAfterFailedCommit = 0;
     this.listenerAfterRollback = 0;

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/internal/cache/PRTXJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PRTXJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PRTXJUnitTest.java
index d2bad64..1caffbd 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PRTXJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PRTXJUnitTest.java
@@ -12,9 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-/**
- * 
- */
 package org.apache.geode.internal.cache;
 
 import org.junit.Ignore;
@@ -30,132 +27,83 @@ import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.query.QueryException;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.cache.InternalRegionArguments;
-import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 
-/**
- *
- */
 @Category(IntegrationTest.class)
 public class PRTXJUnitTest extends TXJUnitTest {
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.geode.TXTest#createRegion()
-   */
   @Override
   protected void createRegion() throws Exception {
-    AttributesFactory af = new AttributesFactory();
-    af.setConcurrencyChecksEnabled(false); // test validation expects this behavior
-    af.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(3).create());
-    // this.region = this.cache.createRegion("PRTXJUnitTest", af.create());
-    this.region = new PRWithLocalOps("PRTXJUnitTest", af.create(), null, this.cache,
-        new InternalRegionArguments().setDestroyLockFlag(true).setRecreateFlag(false)
+    AttributesFactory attributesFactory = new AttributesFactory();
+    // test validation expects this behavior
+    attributesFactory.setConcurrencyChecksEnabled(false);
+    attributesFactory
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(3).create());
+
+    this.region = new PRWithLocalOps(getClass().getSimpleName(), attributesFactory.create(), null,
+        this.cache, new InternalRegionArguments().setDestroyLockFlag(true).setRecreateFlag(false)
             .setSnapshotInputStream(null).setImageTarget(null));
+
     ((PartitionedRegion) this.region).initialize(null, null, null);
     ((PartitionedRegion) this.region).postCreateRegion();
     this.cache.setRegionByPath(this.region.getFullPath(), (LocalRegion) this.region);
   }
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.geode.TXTest#checkUserAttributeConflict(org.apache.geode.internal.cache.
-   * TXManagerImpl)
-   */
   @Override
   protected void checkUserAttributeConflict(CacheTransactionManager txMgrImpl) {}
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see
-   * org.apache.geode.TXTest#checkSubRegionCollecection(org.apache.geode.internal.cache.LocalRegion)
-   */
   @Override
-  protected void checkSubRegionCollecection(Region reg1) {}
+  protected void checkSubRegionCollection(Region reg1) {}
 
   @Override
   @Ignore
   @Test
-  public void testTXAndQueries() throws CacheException, QueryException {
-    // TODO fix this?
-  }
+  public void testTXAndQueries() throws CacheException, QueryException {}
 
   @Override
   @Ignore
   @Test
-  public void testCollections() throws CacheException {
-    // TODO make PR iterators tx aware
-  }
+  public void testCollections() throws CacheException {}
 
   @Override
   @Ignore
   @Test
-  public void testTxAlgebra() throws CacheException {
-    // TODO Auto-generated method stub
-  }
+  public void testTxAlgebra() throws CacheException {}
 
   @Test
   public void testTxId() {
-    AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
-    af.setPartitionAttributes(
+    AttributesFactory<String, Integer> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setPartitionAttributes(
         new PartitionAttributesFactory<String, Integer>().setTotalNumBuckets(2).create());
-    Region<String, Integer> r = this.cache.createRegion("testTxId", af.create());
-    r.put("one", 1);
-    CacheTransactionManager mgr = this.cache.getTxManager();
-    mgr.begin();
-    r.put("two", 2);
-    mgr.getTransactionId();
-    mgr.rollback();
+
+    Region<String, Integer> region =
+        this.cache.createRegion("testTxId", attributesFactory.create());
+    region.put("one", 1);
+
+    CacheTransactionManager txManager = this.cache.getTxManager();
+    txManager.begin();
+    region.put("two", 2);
+    txManager.getTransactionId();
+    txManager.rollback();
   }
 
   private static class PRWithLocalOps extends PartitionedRegion {
 
-    /**
-     * @param regionname
-     * @param ra
-     * @param parentRegion
-     * @param cache
-     * @param internalRegionArgs
-     */
-    public PRWithLocalOps(String regionname, RegionAttributes ra, LocalRegion parentRegion,
+    PRWithLocalOps(String regionName, RegionAttributes ra, LocalRegion parentRegion,
         GemFireCacheImpl cache, InternalRegionArguments internalRegionArgs) {
-      super(regionname, ra, parentRegion, cache, internalRegionArgs);
+      super(regionName, ra, parentRegion, cache, internalRegionArgs);
     }
 
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.geode.internal.cache.PartitionedRegion#localDestroy(java.lang.Object,
-     * java.lang.Object)
-     */
     @Override
     public void localDestroy(Object key, Object callbackArgument) throws EntryNotFoundException {
       super.destroy(key, callbackArgument);
     }
 
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.geode.internal.cache.PartitionedRegion#localInvalidate(java.lang.Object,
-     * java.lang.Object)
-     */
     @Override
     public void localInvalidate(Object key, Object callbackArgument) throws EntryNotFoundException {
       super.invalidate(key, callbackArgument);
     }
 
-    /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.geode.internal.cache.PartitionedRegion#localInvalidateRegion(java.lang.Object)
-     */
     @Override
     public void localInvalidateRegion(Object callbackArgument) {
       super.invalidateRegion(callbackArgument);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java
index d57ce12..b7ee5c8 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java
@@ -14,31 +14,59 @@
  */
 package org.apache.geode.internal.cache.wan.parallel;
 
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.CancelCriterion;
-import org.apache.geode.cache.*;
-import org.apache.geode.internal.cache.*;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.internal.cache.BucketAdvisor;
+import org.apache.geode.internal.cache.BucketRegionQueue;
+import org.apache.geode.internal.cache.BucketRegionQueueHelper;
+import org.apache.geode.internal.cache.EvictionAttributesImpl;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.InternalRegionArguments;
+import org.apache.geode.internal.cache.KeyInfo;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionDataStore;
+import org.apache.geode.internal.cache.PartitionedRegionHelper;
+import org.apache.geode.internal.cache.PartitionedRegionStats;
+import org.apache.geode.internal.cache.ProxyBucketRegion;
+import org.apache.geode.internal.cache.RegionQueue;
 import org.apache.geode.internal.cache.lru.LRUAlgorithm;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
 import org.apache.geode.internal.cache.wan.AbstractGatewaySender;
 import org.apache.geode.internal.cache.wan.GatewaySenderEventImpl;
 import org.apache.geode.test.fake.Fakes;
 import org.apache.geode.test.junit.categories.UnitTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.util.*;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 @Category(UnitTest.class)
 public class ParallelQueueRemovalMessageJUnitTest {
 
+  private static final String GATEWAY_SENDER_ID = "ny";
+  private static final int BUCKET_ID = 85;
+  private static final long KEY = 198;
+
   private GemFireCacheImpl cache;
   private PartitionedRegion queueRegion;
   private AbstractGatewaySender sender;
@@ -46,10 +74,6 @@ public class ParallelQueueRemovalMessageJUnitTest {
   private BucketRegionQueue bucketRegionQueue;
   private BucketRegionQueueHelper bucketRegionQueueHelper;
 
-  private static String GATEWAY_SENDER_ID = "ny";
-  private static int BUCKET_ID = 85;
-  private static long KEY = 198l;
-
   @Before
   public void setUpGemFire() {
     createCache();
@@ -115,10 +139,10 @@ public class ParallelQueueRemovalMessageJUnitTest {
     when(this.queueRegion.getPartitionAttributes()).thenReturn(pa);
     when(this.queueRegion.getDataPolicy()).thenReturn(DataPolicy.PARTITION);
     when(pa.getColocatedWith()).thenReturn(null);
-    ProxyBucketRegion pbr = new ProxyBucketRegion(BUCKET_ID, this.queueRegion, pbrIra); // final
-                                                                                        // classes
-                                                                                        // cannot be
-                                                                                        // mocked
+
+    // final classes cannot be mocked
+    ProxyBucketRegion pbr = new ProxyBucketRegion(BUCKET_ID, this.queueRegion, pbrIra);
+
     when(ba.getProxyBucketRegion()).thenReturn(pbr);
 
     // Create RegionAttributes
@@ -182,11 +206,11 @@ public class ParallelQueueRemovalMessageJUnitTest {
     assertFalse(this.bucketRegionQueue.isInitialized());
 
     // Create a real ConcurrentParallelGatewaySenderQueue
-    ParallelGatewaySenderEventProcessor pgsep = createConcurrentParallelGatewaySenderQueue();
+    ParallelGatewaySenderEventProcessor processor = createConcurrentParallelGatewaySenderQueue();
 
     // Add a mock GatewaySenderEventImpl to the temp queue
     BlockingQueue<GatewaySenderEventImpl> tempQueue =
-        createTempQueueAndAddEvent(pgsep, mock(GatewaySenderEventImpl.class));
+        createTempQueueAndAddEvent(processor, mock(GatewaySenderEventImpl.class));
     assertEquals(1, tempQueue.size());
 
     // Create and process a ParallelQueueRemovalMessage (causes the failedBatchRemovalMessageKeys to
@@ -204,14 +228,14 @@ public class ParallelQueueRemovalMessageJUnitTest {
     assertEquals(0, this.bucketRegionQueue.size());
 
     // Create a real ConcurrentParallelGatewaySenderQueue
-    ParallelGatewaySenderEventProcessor pgsep = createConcurrentParallelGatewaySenderQueue();
+    ParallelGatewaySenderEventProcessor processor = createConcurrentParallelGatewaySenderQueue();
 
     // Add an event to the BucketRegionQueue and verify BucketRegionQueue state
-    GatewaySenderEventImpl gsei = this.bucketRegionQueueHelper.addEvent(KEY);
+    GatewaySenderEventImpl event = this.bucketRegionQueueHelper.addEvent(KEY);
     assertEquals(1, this.bucketRegionQueue.size());
 
     // Add a mock GatewaySenderEventImpl to the temp queue
-    BlockingQueue<GatewaySenderEventImpl> tempQueue = createTempQueueAndAddEvent(pgsep, gsei);
+    BlockingQueue<GatewaySenderEventImpl> tempQueue = createTempQueueAndAddEvent(processor, event);
     assertEquals(1, tempQueue.size());
 
     // Create and process a ParallelQueueRemovalMessage (causes the value of the entry to be set to
@@ -230,9 +254,9 @@ public class ParallelQueueRemovalMessageJUnitTest {
   }
 
   private void createAndProcessParallelQueueRemovalMessage() {
-    ParallelQueueRemovalMessage pqrm =
+    ParallelQueueRemovalMessage message =
         new ParallelQueueRemovalMessage(createRegionToDispatchedKeysMap());
-    pqrm.process(null);
+    message.process(null);
   }
 
   private HashMap<String, Map<Integer, List<Long>>> createRegionToDispatchedKeysMap() {
@@ -246,23 +270,23 @@ public class ParallelQueueRemovalMessageJUnitTest {
   }
 
   private ParallelGatewaySenderEventProcessor createConcurrentParallelGatewaySenderQueue() {
-    ParallelGatewaySenderEventProcessor pgsep = new ParallelGatewaySenderEventProcessor(sender);
-    ConcurrentParallelGatewaySenderQueue cpgsq = new ConcurrentParallelGatewaySenderQueue(sender,
-        new ParallelGatewaySenderEventProcessor[] {pgsep});
+    ParallelGatewaySenderEventProcessor processor = new ParallelGatewaySenderEventProcessor(sender);
+    ConcurrentParallelGatewaySenderQueue queue = new ConcurrentParallelGatewaySenderQueue(sender,
+        new ParallelGatewaySenderEventProcessor[] {processor});
     Set<RegionQueue> queues = new HashSet<>();
-    queues.add(cpgsq);
+    queues.add(queue);
     when(this.sender.getQueues()).thenReturn(queues);
-    return pgsep;
+    return processor;
   }
 
   private BlockingQueue<GatewaySenderEventImpl> createTempQueueAndAddEvent(
-      ParallelGatewaySenderEventProcessor pgsep, GatewaySenderEventImpl gsei) {
-    ParallelGatewaySenderQueue pgsq = (ParallelGatewaySenderQueue) pgsep.getQueue();
+      ParallelGatewaySenderEventProcessor processor, GatewaySenderEventImpl event) {
+    ParallelGatewaySenderQueue queue = (ParallelGatewaySenderQueue) processor.getQueue();
     Map<Integer, BlockingQueue<GatewaySenderEventImpl>> tempQueueMap =
-        pgsq.getBucketToTempQueueMap();
-    BlockingQueue<GatewaySenderEventImpl> tempQueue = new LinkedBlockingQueue();
-    when(gsei.getShadowKey()).thenReturn(KEY);
-    tempQueue.add(gsei);
+        queue.getBucketToTempQueueMap();
+    BlockingQueue<GatewaySenderEventImpl> tempQueue = new LinkedBlockingQueue<>();
+    when(event.getShadowKey()).thenReturn(KEY);
+    tempQueue.add(event);
     tempQueueMap.put(BUCKET_ID, tempQueue);
     return tempQueue;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-cq/src/main/java/org/apache/geode/cache/query/internal/cq/CqServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/cache/query/internal/cq/CqServiceImpl.java b/geode-cq/src/main/java/org/apache/geode/cache/query/internal/cq/CqServiceImpl.java
index 570c06c..54cadcb 100644
--- a/geode-cq/src/main/java/org/apache/geode/cache/query/internal/cq/CqServiceImpl.java
+++ b/geode-cq/src/main/java/org/apache/geode/cache/query/internal/cq/CqServiceImpl.java
@@ -776,7 +776,7 @@ public final class CqServiceImpl implements CqService {
     if (clientProxyId == null) {
       throw new CqException(
           LocalizedStrings.CqService_UNABLE_TO_RETRIEVE_DURABLE_CQS_FOR_CLIENT_PROXY_ID
-              .toLocalizedString(null));
+              .toLocalizedString());
     }
     List<ServerCQ> cqs = getAllClientCqs(clientProxyId);
     ArrayList<String> durableClientCqs = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-cq/src/test/java/org/apache/geode/cache/query/dunit/QueryMonitorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/test/java/org/apache/geode/cache/query/dunit/QueryMonitorDUnitTest.java b/geode-cq/src/test/java/org/apache/geode/cache/query/dunit/QueryMonitorDUnitTest.java
index f298fae..9332388 100644
--- a/geode-cq/src/test/java/org/apache/geode/cache/query/dunit/QueryMonitorDUnitTest.java
+++ b/geode-cq/src/test/java/org/apache/geode/cache/query/dunit/QueryMonitorDUnitTest.java
@@ -220,12 +220,12 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
       Assert.fail("While starting CacheServer", ex);
     }
     Cache cache = getCache();
-    GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME = queryMonitorTime;
+    GemFireCacheImpl.getInstance().testMaxQueryExecutionTime = queryMonitorTime;
     cache.getLogger().fine("#### RUNNING TEST : " + testName);
     DefaultQuery.testHook = new QueryTimeoutHook(queryMonitorTime);
-    // ((GemFireCache)cache).TEST_MAX_QUERY_EXECUTION_TIME = queryMonitorTime;
+    // ((GemFireCache)cache).testMaxQueryExecutionTime = queryMonitorTime;
     System.out.println("MAX_QUERY_EXECUTION_TIME is set to: "
-        + ((GemFireCacheImpl) cache).TEST_MAX_QUERY_EXECUTION_TIME);
+        + ((GemFireCacheImpl) cache).testMaxQueryExecutionTime);
     return port;
   }
 
@@ -236,10 +236,10 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
         // Reset the test flag.
         Cache cache = getCache();
         DefaultQuery.testHook = null;
-        GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME = -1;
+        GemFireCacheImpl.getInstance().testMaxQueryExecutionTime = -1;
         stopBridgeServer(getCache());
         System.out.println("MAX_QUERY_EXECUTION_TIME is set to: "
-            + ((GemFireCacheImpl) cache).TEST_MAX_QUERY_EXECUTION_TIME);
+            + ((GemFireCacheImpl) cache).testMaxQueryExecutionTime);
       }
     };
     server.invoke(stopServer);
@@ -333,7 +333,7 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
   private void executeQueriesFromClient(int timeout) {
     try {
       ClientCache anyInstance = ClientCacheFactory.getAnyInstance();
-      ((GemFireCacheImpl) anyInstance).TEST_MAX_QUERY_EXECUTION_TIME = timeout;
+      ((GemFireCacheImpl) anyInstance).testMaxQueryExecutionTime = timeout;
       Pool pool = PoolManager.find(poolName);
       QueryService queryService = pool.getQueryService();
       executeQueriesAgainstQueryService(queryService);
@@ -882,7 +882,7 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
   protected CqQueryDUnitTest cqDUnitTest = new CqQueryDUnitTest();
 
   /**
-   * The following CQ test is added to make sure TEST_MAX_QUERY_EXECUTION_TIME is reset and is not
+   * The following CQ test is added to make sure testMaxQueryExecutionTime is reset and is not
    * affecting other query related tests.
    * 
    * @throws Exception
@@ -917,7 +917,7 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
       public void run2() throws CacheException {
         Cache cache = getCache();
         System.out.println("TEST CQ MAX_QUERY_EXECUTION_TIME is set to: "
-            + ((GemFireCacheImpl) cache).TEST_MAX_QUERY_EXECUTION_TIME);
+            + ((GemFireCacheImpl) cache).testMaxQueryExecutionTime);
 
         Region region1 = getRootRegion().getSubregion(cqDUnitTest.regions[0]);
         for (int i = 1; i <= 5; i++) {
@@ -942,7 +942,7 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
       public void run2() throws CacheException {
         Cache cache = getCache();
         System.out.println("TEST CQ MAX_QUERY_EXECUTION_TIME is set to: "
-            + ((GemFireCacheImpl) cache).TEST_MAX_QUERY_EXECUTION_TIME);
+            + ((GemFireCacheImpl) cache).testMaxQueryExecutionTime);
 
         Region region1 = getRootRegion().getSubregion(cqDUnitTest.regions[0]);
         for (int i = 1; i <= 5; i++) {
@@ -1127,7 +1127,7 @@ public class QueryMonitorDUnitTest extends JUnit4CacheTestCase {
               }
               break;
             }
-            // ((GemFireCache)cache).TEST_MAX_QUERY_EXECUTION_TIME = queryMonitorTime;
+            // ((GemFireCache)cache).testMaxQueryExecutionTime = queryMonitorTime;
           }
         };
     vm.invoke(validateThreadCnt);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
index 0449a45..3190cd7 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
@@ -38,7 +38,7 @@ import org.apache.geode.cache.execute.Execution;
 import org.apache.geode.cache.execute.FunctionException;
 import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.execute.ResultCollector;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.execute.util.FindRestEnabledServersFunction;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.rest.internal.web.controllers.support.RestServersResultCollector;
 import org.apache.geode.rest.internal.web.exception.GemfireRestException;
@@ -193,7 +193,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
 
     try {
       final ResultCollector<?, ?> results = function.withCollector(new RestServersResultCollector())
-          .execute(GemFireCacheImpl.FIND_REST_ENABLED_SERVERS_FUNCTION_ID);
+          .execute(FindRestEnabledServersFunction.FIND_REST_ENABLED_SERVERS_FUNCTION_ID);
       Object functionResult = results.getResult();
 
       if (functionResult instanceof List<?>) {


[2/6] geode git commit: Safe refactorings

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
index 709308b..33a7f52 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
@@ -106,8 +106,8 @@ public interface InternalCache extends Cache, Extensible<Cache> {
 
   boolean requiresNotificationFromPR(PartitionedRegion r);
 
-  RegionAttributes invokeRegionBefore(LocalRegion parent, String name, RegionAttributes attrs,
-      InternalRegionArguments internalRegionArgs);
+  <K, V> RegionAttributes<K, V> invokeRegionBefore(LocalRegion parent, String name,
+      RegionAttributes<K, V> attrs, InternalRegionArguments internalRegionArgs);
 
   void invokeRegionAfter(LocalRegion region);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 45035d7..8c061b0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -5853,8 +5853,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       tx.txPutEntry(event, ifNew, false, false, null);
       return null;
     } else {
-      if (GemFireCacheImpl.internalBeforeNonTXBasicPut != null) {
-        GemFireCacheImpl.internalBeforeNonTXBasicPut.run();
+      if (DistTXState.internalBeforeNonTXBasicPut != null) {
+        DistTXState.internalBeforeNonTXBasicPut.run();
       }
 
       RegionEntry oldEntry = this.entries.basicPut(event, lastModified, ifNew, false, // ifOld

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/FindRestEnabledServersFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/FindRestEnabledServersFunction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/FindRestEnabledServersFunction.java
index 5da63ad..13d8e18 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/FindRestEnabledServersFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/FindRestEnabledServersFunction.java
@@ -34,6 +34,12 @@ import org.apache.geode.management.internal.RestAgent;
 
 public class FindRestEnabledServersFunction extends FunctionAdapter implements InternalEntity {
 
+  /**
+   * This property defines internal function that will get executed on each node to fetch active
+   * REST service endpoints (servers).
+   */
+  public static final String FIND_REST_ENABLED_SERVERS_FUNCTION_ID =
+      FindRestEnabledServersFunction.class.getName();
   private static final long serialVersionUID = 7851518767859544678L;
 
 
@@ -61,7 +67,7 @@ public class FindRestEnabledServersFunction extends FunctionAdapter implements I
   }
 
   public String getId() {
-    return GemFireCacheImpl.FIND_REST_ENABLED_SERVERS_FUNCTION_ID;
+    return FIND_REST_ENABLED_SERVERS_FUNCTION_ID;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/PersistenceAdvisorImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/PersistenceAdvisorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/PersistenceAdvisorImpl.java
index 7e30141..fc95f0b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/PersistenceAdvisorImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/PersistenceAdvisorImpl.java
@@ -365,10 +365,10 @@ public class PersistenceAdvisorImpl implements PersistenceAdvisor {
 
   /**
    * Start listening for persistent view updates and apply any updates that have already happened.
-   * 
+   *
    * This method should be called after we have decided that there is no conflicting persistent
    * exception.
-   * 
+   *
    * Fix for bug 44045.
    */
   protected void beginUpdatingPersistentView() {
@@ -776,9 +776,9 @@ public class PersistenceAdvisorImpl implements PersistenceAdvisor {
   /**
    * Returns the member id of the member who has the latest copy of the persistent region. This may
    * be the local member ID if this member has the latest known copy.
-   * 
+   *
    * This method will block until the latest member is online.
-   * 
+   *
    * @throws ConflictingPersistentDataException if there are active members which are not based on
    *         the state that is persisted in this member.
    */

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
index a5f0fc2..b07ccba 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
@@ -1401,7 +1401,7 @@ public class CacheCreation implements InternalCache {
    * 
    * @see org.apache.geode.cache.Cache#getMembers(org.apache.geode.cache.Region)
    */
-  public Set<DistributedMember> getMembers(Region r) {
+  public Set<DistributedMember> getMembers(Region region) {
     return Collections.EMPTY_SET;
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java b/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
index d6a1efa..2b847d0 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/beans/MemberMBeanBridge.java
@@ -1270,7 +1270,6 @@ public class MemberMBeanBridge {
 
 
   /**
-   * 
    * @return the duration for which the member is up
    */
   public long getMemberUpTime() {
@@ -1278,25 +1277,23 @@ public class MemberMBeanBridge {
   }
 
   /**
-   * 
    * @return root region names
    */
   public String[] getRootRegionNames() {
-    Set<LocalRegion> listOfRootRegions = cache.rootRegions();
+    Set<Region<?, ?>> listOfRootRegions = cache.rootRegions();
     if (listOfRootRegions != null && listOfRootRegions.size() > 0) {
-      String[] regionStr = new String[listOfRootRegions.size()];
+      String[] regionNames = new String[listOfRootRegions.size()];
       int j = 0;
-      for (LocalRegion rg : listOfRootRegions) {
-        regionStr[j] = rg.getFullPath();
+      for (Region region : listOfRootRegions) {
+        regionNames[j] = region.getFullPath();
         j++;
       }
-      return regionStr;
+      return regionNames;
     }
     return ManagementConstants.NO_DATA_STRING;
   }
 
   /**
-   * 
    * @return Current GemFire version
    */
   public String getVersion() {

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/test/java/org/apache/geode/TXJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/TXJUnitTest.java b/geode-core/src/test/java/org/apache/geode/TXJUnitTest.java
index 49348bd..6ecb8ca 100644
--- a/geode-core/src/test/java/org/apache/geode/TXJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/TXJUnitTest.java
@@ -14,8 +14,68 @@
  */
 package org.apache.geode;
 
-import org.apache.geode.cache.*;
-import org.apache.geode.cache.query.*;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Properties;
+import java.util.Set;
+
+import javax.transaction.Synchronization;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.AttributesMutator;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheEvent;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.CacheLoader;
+import org.apache.geode.cache.CacheLoaderException;
+import org.apache.geode.cache.CacheTransactionManager;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.CommitConflictException;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.EntryExistsException;
+import org.apache.geode.cache.EntryNotFoundException;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.FailedSynchronizationException;
+import org.apache.geode.cache.LoaderHelper;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.cache.TimeoutException;
+import org.apache.geode.cache.TransactionEvent;
+import org.apache.geode.cache.TransactionException;
+import org.apache.geode.cache.TransactionId;
+import org.apache.geode.cache.TransactionListener;
+import org.apache.geode.cache.UnsupportedOperationInTransactionException;
+import org.apache.geode.cache.query.Index;
+import org.apache.geode.cache.query.IndexType;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.QueryException;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.SelectResults;
 import org.apache.geode.cache.query.internal.index.IndexManager;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.cache.util.TransactionListenerAdapter;
@@ -23,34 +83,29 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.NanoTimer;
-import org.apache.geode.internal.cache.*;
+import org.apache.geode.internal.cache.AbstractRegion;
+import org.apache.geode.internal.cache.CachePerfStats;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.LocalRegion;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.TXManagerImpl;
+import org.apache.geode.internal.cache.TXStateProxy;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.util.StopWatch;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.junit.*;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-
-import javax.transaction.Synchronization;
-import java.util.*;
-
-import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
-import static org.junit.Assert.*;
 
 /**
  * Tests basic transaction functionality
  *
  * @since GemFire 4.0
- *
  */
 @Category(IntegrationTest.class)
+@SuppressWarnings("deprecated")
 public class TXJUnitTest {
 
-  @Rule
-  public TestName testName = new TestName();
-
   private int cbCount;
   private TransactionEvent te;
+
   protected int listenerAfterCommit;
   protected int listenerAfterFailedCommit;
   protected int listenerAfterRollback;
@@ -58,16 +113,21 @@ public class TXJUnitTest {
   protected CacheTransactionManager txMgr;
 
   protected GemFireCacheImpl cache;
-  protected Region region;
+  protected Region<String, String> region;
+
+  @Rule
+  public TestName testName = new TestName();
 
   private boolean isPR() {
     return (this.region instanceof PartitionedRegion);
   }
 
   protected void createCache() throws Exception {
-    Properties p = new Properties();
-    p.setProperty(MCAST_PORT, "0"); // loner
-    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p));
+    Properties properties = new Properties();
+    properties.setProperty(MCAST_PORT, "0"); // loner
+
+    this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(properties));
+
     createRegion();
     this.txMgr = this.cache.getCacheTransactionManager();
     this.listenerAfterCommit = 0;
@@ -76,15 +136,13 @@ public class TXJUnitTest {
     this.listenerClose = 0;
   }
 
-  /**
-   * 
-   */
   protected void createRegion() throws Exception {
-    AttributesFactory af = new AttributesFactory();
-    af.setScope(Scope.DISTRIBUTED_NO_ACK);
-    af.setConcurrencyChecksEnabled(false); // test validation expects this behavior
-    af.setIndexMaintenanceSynchronous(true);
-    this.region = this.cache.createRegion("TXJUnitTest", af.create());
+    AttributesFactory<String, String> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+    attributesFactory.setConcurrencyChecksEnabled(false); // test validation expects this behavior
+    attributesFactory.setIndexMaintenanceSynchronous(true);
+
+    this.region = this.cache.createRegion(getClass().getSimpleName(), attributesFactory.create());
   }
 
   protected void closeCache() {
@@ -104,12 +162,12 @@ public class TXJUnitTest {
   }
 
   @Before
-  public void setUp() throws Exception {
+  public void setUpTXJUnitTest() throws Exception {
     createCache();
   }
 
   @After
-  public void tearDown() throws Exception {
+  public void tearDownTXJUnitTest() throws Exception {
     closeCache();
   }
 
@@ -358,31 +416,37 @@ public class TXJUnitTest {
     final CachePerfStats stats = this.cache.getCachePerfStats();
     int txCommitChanges;
     TransactionId myTxId;
-    AttributesFactory af = new AttributesFactory();
-    af.setScope(Scope.DISTRIBUTED_NO_ACK);
-    Region reg1 = this.region;
-    Region reg2 = this.cache.createRegion(getUniqueName(), af.create());
+
+    AttributesFactory<String, String> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+
+    Region<String, String> reg1 = this.region;
+    Region<String, String> reg2 =
+        this.cache.createRegion(getUniqueName(), attributesFactory.create());
 
     this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         listenerAfterCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         listenerAfterFailedCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         listenerAfterRollback = 1;
         te = event;
       }
 
+      @Override
       public void close() {
         listenerClose = 1;
       }
-
     });
 
     // see if commits work
@@ -406,12 +470,11 @@ public class TXJUnitTest {
     assertEquals("value2", reg2.get("key2"));
     assertEquals(txCommitChanges + 2, stats.getTxCommitChanges());
     {
-      Collection creates = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(2, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1 || ev.getRegion() == reg2);
         if (ev.getRegion() == reg1) {
@@ -442,28 +505,31 @@ public class TXJUnitTest {
   @Test
   public void testTxEvent() throws CacheException {
     TransactionId myTxId;
-    Region reg1 = this.region;
+    Region<String, String> reg1 = this.region;
 
     this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         listenerAfterCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         listenerAfterFailedCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         listenerAfterRollback = 1;
         te = event;
       }
 
+      @Override
       public void close() {
         listenerClose = 1;
       }
-
     });
 
     // make sure each operation has the correct transaction event
@@ -479,12 +545,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -515,12 +580,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getPutEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getPutEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -551,12 +615,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -587,12 +650,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -624,12 +686,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -662,12 +723,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -699,12 +759,11 @@ public class TXJUnitTest {
     {
       Cache teCache = this.te.getCache();
       assertEquals(teCache, this.cache);
-      Collection creates = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> creates = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, creates.size());
-      Iterator it = creates.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : creates) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -726,7 +785,7 @@ public class TXJUnitTest {
   }
 
   private static class CountingCallBackValidator {
-    ArrayList asserts;
+    List<Object> asserts;
     final String createWriterAssert = "create writer Assert";
     final String createListenerAssert = "create listener Assert";
     final String updateWriterAssert = "update writer Assert";
@@ -743,7 +802,7 @@ public class TXJUnitTest {
     CountingCallBackValidator(CountingCacheListener cl, CountingCacheWriter cw) {
       this.cl = cl;
       this.cw = cw;
-      this.asserts = new ArrayList(8);
+      this.asserts = new ArrayList<>(8);
     }
 
     void assertCreateWriterCnt(int cnt) {
@@ -753,7 +812,7 @@ public class TXJUnitTest {
     void assertCreateWriterCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(createWriterAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cw.getBeforeCreateCalls());
     }
@@ -765,7 +824,7 @@ public class TXJUnitTest {
     void assertCreateListenerCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(createListenerAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cl.getAfterCreateCalls());
     }
@@ -777,7 +836,7 @@ public class TXJUnitTest {
     void assertDestroyWriterCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(destroyWriterAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cw.getBeforeDestroyCalls(false));
     }
@@ -789,7 +848,7 @@ public class TXJUnitTest {
     void assertDestroyListenerCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(destroyListenerAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cl.getAfterDestroyCalls(false));
     }
@@ -801,7 +860,7 @@ public class TXJUnitTest {
     void assertLocalDestroyWriterCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(localDestroyWriterAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(0, this.cw.getBeforeDestroyCalls(true));
     }
@@ -813,7 +872,7 @@ public class TXJUnitTest {
     void assertLocalDestroyListenerCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(localDestroyListenerAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cl.getAfterDestroyCalls(true));
     }
@@ -825,7 +884,7 @@ public class TXJUnitTest {
     void assertUpdateWriterCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(updateWriterAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cw.getBeforeUpdateCalls());
     }
@@ -837,7 +896,7 @@ public class TXJUnitTest {
     void assertUpdateListenerCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(updateListenerAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cl.getAfterUpdateCalls());
     }
@@ -849,7 +908,7 @@ public class TXJUnitTest {
     void assertInvalidateCnt(int cnt, boolean remember) {
       if (remember) {
         this.asserts.add(invalAssert);
-        this.asserts.add(new Integer(cnt));
+        this.asserts.add(cnt);
       }
       assertEquals(cnt, this.cl.getAfterInvalidateCalls());
     }
@@ -864,7 +923,7 @@ public class TXJUnitTest {
         assertTrue("CountingCallBackValidator reassert, did not have an associated count",
             assertItr.hasNext());
         count = (Integer) assertItr.next();
-        cnt = count.intValue();
+        cnt = count;
         if (assertType.equals(createWriterAssert)) {
           this.assertCreateWriterCnt(cnt, false);
         } else if (assertType.equals(createListenerAssert)) {
@@ -896,77 +955,86 @@ public class TXJUnitTest {
     }
   }
 
-  private static interface CountingCacheListener extends CacheListener {
-    public int getAfterCreateCalls();
+  private interface CountingCacheListener extends CacheListener {
+    int getAfterCreateCalls();
 
-    public int getAfterUpdateCalls();
+    int getAfterUpdateCalls();
 
-    public int getAfterInvalidateCalls();
+    int getAfterInvalidateCalls();
 
-    public int getAfterDestroyCalls(boolean fetchLocal);
+    int getAfterDestroyCalls(boolean fetchLocal);
 
-    public void reset();
+    void reset();
   }
 
-  private static interface CountingCacheWriter extends CacheWriter {
-    public int getBeforeCreateCalls();
+  private interface CountingCacheWriter extends CacheWriter {
+    int getBeforeCreateCalls();
 
-    public int getBeforeUpdateCalls();
+    int getBeforeUpdateCalls();
 
-    public int getBeforeDestroyCalls(boolean fetchLocal);
+    int getBeforeDestroyCalls(boolean fetchLocal);
 
-    public void reset();
+    void reset();
   }
 
   @Test
   public void testTxAlgebra() throws CacheException {
     TransactionId myTxId;
-    Region reg1 = this.region;
+    Region<String, String> reg1 = this.region;
 
     this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         listenerAfterCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         listenerAfterFailedCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         listenerAfterRollback = 1;
         te = event;
       }
 
+      @Override
       public void close() {
         listenerClose = 1;
       }
-
     });
-    AttributesMutator mutator = this.region.getAttributesMutator();
+    AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
     CountingCacheListener cntListener = new CountingCacheListener() {
       volatile int aCreateCalls, aUpdateCalls, aInvalidateCalls, aDestroyCalls, aLocalDestroyCalls;
 
+      @Override
       public void close() {}
 
+      @Override
       public void reset() {
         this.aCreateCalls = this.aUpdateCalls =
             this.aInvalidateCalls = this.aDestroyCalls = this.aLocalDestroyCalls = 0;
       }
 
+      @Override
       public void afterCreate(EntryEvent e) {
         ++this.aCreateCalls;
       }
 
+      @Override
       public void afterUpdate(EntryEvent e) {
         ++this.aUpdateCalls;
       }
 
+      @Override
       public void afterInvalidate(EntryEvent e) {
         ++this.aInvalidateCalls;
       }
 
+      @Override
       public void afterDestroy(EntryEvent e) {
         if (e.getOperation().isDistributed()) {
           ++this.aDestroyCalls;
@@ -975,34 +1043,43 @@ public class TXJUnitTest {
         }
       }
 
+      @Override
       public void afterRegionInvalidate(RegionEvent e) {
         fail("Unexpected afterRegionInvalidate in testTxAlgebra");
       }
 
+      @Override
       public void afterRegionDestroy(RegionEvent e) {
         if (!e.getOperation().isClose()) {
           fail("Unexpected afterRegionDestroy in testTxAlgebra");
         }
       }
 
+      @Override
       public void afterRegionClear(RegionEvent event) {}
 
+      @Override
       public void afterRegionCreate(RegionEvent event) {}
 
+      @Override
       public void afterRegionLive(RegionEvent event) {}
 
+      @Override
       public int getAfterCreateCalls() {
         return this.aCreateCalls;
       }
 
+      @Override
       public int getAfterUpdateCalls() {
         return this.aUpdateCalls;
       }
 
+      @Override
       public int getAfterInvalidateCalls() {
         return this.aInvalidateCalls;
       }
 
+      @Override
       public int getAfterDestroyCalls(boolean fetchLocal) {
         return fetchLocal ? this.aLocalDestroyCalls : this.aDestroyCalls;
       }
@@ -1011,40 +1088,50 @@ public class TXJUnitTest {
     CountingCacheWriter cntWriter = new CountingCacheWriter() {
       int bCreateCalls, bUpdateCalls, bDestroyCalls, bLocalDestroyCalls;
 
+      @Override
       public void close() {}
 
+      @Override
       public void reset() {
         this.bCreateCalls = this.bUpdateCalls = this.bDestroyCalls = this.bLocalDestroyCalls = 0;
       }
 
+      @Override
       public void beforeCreate(EntryEvent e) {
         ++this.bCreateCalls;
       }
 
+      @Override
       public void beforeUpdate(EntryEvent e) {
         ++this.bUpdateCalls;
       }
 
+      @Override
       public void beforeDestroy(EntryEvent e) {
         ++this.bDestroyCalls;
       }
 
+      @Override
       public void beforeRegionDestroy(RegionEvent e) {
         fail("Unexpected beforeRegionDestroy in testTxAlgebra");
       }
 
+      @Override
       public void beforeRegionClear(RegionEvent e) {
         fail("Unexpected beforeRegionClear in testTxAlgebra");
       }
 
+      @Override
       public int getBeforeCreateCalls() {
         return this.bCreateCalls;
       }
 
+      @Override
       public int getBeforeUpdateCalls() {
         return this.bUpdateCalls;
       }
 
+      @Override
       public int getBeforeDestroyCalls(boolean fetchLocal) {
         return fetchLocal ? this.bLocalDestroyCalls : this.bDestroyCalls;
       }
@@ -1082,12 +1169,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(0, this.te.getDestroyEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1127,12 +1213,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(0, this.te.getDestroyEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1151,7 +1236,7 @@ public class TXJUnitTest {
     }
     reg1.localDestroy("key1");
 
-    // @todo mitch implement the following
+    // TODO: mitch implement the following
     // check LI + DI -> NOOP
     // check DI + LI -> NOOP
     // check DI + DI -> NOOP
@@ -1160,7 +1245,6 @@ public class TXJUnitTest {
     // check C + DD -> NOOP
     callbackVal.reset();
     this.txMgr.begin();
-    myTxId = this.txMgr.getTransactionId();
     reg1.create("key1", "value0");
     callbackVal.assertCreateWriterCnt(1);
     reg1.destroy("key1");
@@ -1198,12 +1282,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1253,12 +1336,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1280,7 +1362,6 @@ public class TXJUnitTest {
     // Check C + LI + LD -> NOOP
     callbackVal.reset();
     this.txMgr.begin();
-    myTxId = this.txMgr.getTransactionId();
     reg1.create("key1", "value1");
     callbackVal.assertCreateWriterCnt(1);
     reg1.localInvalidate("key1");
@@ -1302,7 +1383,6 @@ public class TXJUnitTest {
     // Check C + LI + DD -> NOOP
     callbackVal.reset();
     this.txMgr.begin();
-    myTxId = this.txMgr.getTransactionId();
     reg1.create("key1", "value1");
     callbackVal.assertCreateWriterCnt(1);
     reg1.localInvalidate("key1");
@@ -1324,7 +1404,6 @@ public class TXJUnitTest {
     // check C + LD -> NOOP
     callbackVal.reset();
     this.txMgr.begin();
-    myTxId = this.txMgr.getTransactionId();
     reg1.create("key1", "value0");
     callbackVal.assertCreateWriterCnt(1);
     reg1.localDestroy("key1");
@@ -1388,12 +1467,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1433,12 +1511,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1486,12 +1563,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getPutEvents();
+      List<EntryEvent<?, ?>> events = this.te.getPutEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1532,12 +1608,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1577,12 +1652,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1622,12 +1696,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1677,12 +1750,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getPutEvents();
+      List<EntryEvent<?, ?>> events = this.te.getPutEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1724,12 +1796,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getCreateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1769,12 +1840,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getCreateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1813,12 +1883,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1885,12 +1954,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1931,12 +1999,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -1981,12 +2048,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getPutEvents();
+      List<EntryEvent<?, ?>> events = this.te.getPutEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2024,12 +2090,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2067,12 +2132,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2142,12 +2206,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2190,12 +2253,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2241,12 +2303,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getPutEvents();
+      List<EntryEvent<?, ?>> events = this.te.getPutEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2285,12 +2346,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2328,12 +2388,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getInvalidateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getDestroyEvents();
+      List<EntryEvent<?, ?>> events = this.te.getDestroyEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2403,12 +2462,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2451,12 +2509,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2501,12 +2558,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getCreateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2549,12 +2605,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getCreateEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getInvalidateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getInvalidateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2594,12 +2649,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2639,12 +2693,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2684,12 +2737,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2729,12 +2781,11 @@ public class TXJUnitTest {
     assertEquals(0, this.te.getDestroyEvents().size());
     assertEquals(1, this.te.getEvents().size());
     {
-      Collection events = this.te.getCreateEvents();
+      List<EntryEvent<?, ?>> events = this.te.getCreateEvents();
       assertEquals(myTxId, this.te.getTransactionId());
       assertEquals(1, events.size());
-      Iterator it = events.iterator();
-      while (it.hasNext()) {
-        EntryEvent ev = (EntryEvent) it.next();
+
+      for (EntryEvent ev : events) {
         assertEquals(myTxId, ev.getTransactionId());
         assertTrue(ev.getRegion() == reg1);
         assertEquals("key1", ev.getKey());
@@ -2898,25 +2949,28 @@ public class TXJUnitTest {
   public void testListener() {
     assertTrue(this.txMgr.getListener() == null);
     TransactionListener oldListener = this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         listenerAfterCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         listenerAfterFailedCommit = 1;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         listenerAfterRollback = 1;
         te = event;
       }
 
+      @Override
       public void close() {
         listenerClose = 1;
       }
-
     });
     assertTrue(oldListener == null);
     this.txMgr.begin();
@@ -2949,20 +3003,24 @@ public class TXJUnitTest {
 
     assertEquals(0, this.listenerClose);
     oldListener = this.txMgr.setListener(new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         listenerAfterCommit = 2;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         listenerAfterFailedCommit = 2;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         listenerAfterRollback = 2;
         te = event;
       }
 
+      @Override
       public void close() {
         listenerClose = 2;
       }
@@ -2988,57 +3046,70 @@ public class TXJUnitTest {
   @Test
   public void testNoCallbacksOnRollback() throws CacheException {
     // install listeners
-    AttributesMutator mutator = this.region.getAttributesMutator();
-    mutator.setCacheListener(new CacheListenerAdapter() {
+    AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
+    mutator.setCacheListener(new CacheListenerAdapter<String, String>() {
+      @Override
       public void close() {
         cbCount++;
       }
 
+      @Override
       public void afterCreate(EntryEvent event) {
         cbCount++;
       }
 
+      @Override
       public void afterUpdate(EntryEvent event) {
         cbCount++;
       }
 
+      @Override
       public void afterInvalidate(EntryEvent event) {
         cbCount++;
       }
 
+      @Override
       public void afterDestroy(EntryEvent event) {
         cbCount++;
       }
 
+      @Override
       public void afterRegionInvalidate(RegionEvent event) {
         cbCount++;
       }
 
+      @Override
       public void afterRegionDestroy(RegionEvent event) {
         cbCount++;
       }
     });
-    mutator.setCacheWriter(new CacheWriter() {
+    mutator.setCacheWriter(new CacheWriter<String, String>() {
+      @Override
       public void close() {
         cbCount++;
       }
 
+      @Override
       public void beforeUpdate(EntryEvent event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeCreate(EntryEvent event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeDestroy(EntryEvent event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeRegionDestroy(RegionEvent event) throws CacheWriterException {
         cbCount++;
       }
 
+      @Override
       public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
         cbCount++;
       }
@@ -3094,10 +3165,9 @@ public class TXJUnitTest {
     this.region.localDestroy("key1");
   }
 
-  //
-  // TXCallBackValidator is a container for holding state for validating Cache
-  // callbacks
-  //
+  /**
+   * TXCallBackValidator is a container for holding state for validating Cache callbacks
+   */
   private class TXCallBackValidator {
     boolean passedValidation;
     boolean suspendValidation;
@@ -3116,8 +3186,9 @@ public class TXJUnitTest {
     boolean isInvalidate;
     Object callBackArg;
 
-    // EntryEvent, CallCount validator for
-    // callbacks (CacheWriter, CacheListener
+    /**
+     * EntryEvent, CallCount validator for callbacks (CacheWriter, CacheListener
+     */
     boolean validate(EntryEvent event, int cnt) {
       if (this.isSuspendValidation()) {
         return true;
@@ -3135,8 +3206,7 @@ public class TXJUnitTest {
       assertNotNull(event.getRegion().getCache());
       assertNotNull(event.getRegion().getCache().getCacheTransactionManager());
       assertEquals(this.getTXId(), event.getTransactionId());
-      // assertIndexDetailsEquals(event.getTransactionId(),
-      // event.getRegion().getCache().getCacheTransactionManager().getTransactionId(), );
+
       if (!isPR())
         assertEquals("IsDistributed Assertion!", this.isDistributed(),
             event.getOperation().isDistributed());
@@ -3162,12 +3232,6 @@ public class TXJUnitTest {
       return oldVal;
     }
 
-    // int getCount() {return this.callCount;}
-    // int setCount(int newVal) {
-    // int oldVal = this.callCount;
-    // this.callCount = newVal;
-    // return oldVal;
-    // }
     void setKey(Object key) {
       this.key = key;
     }
@@ -3278,35 +3342,39 @@ public class TXJUnitTest {
       return this.passedValidation;
     }
   }
-  private static interface ValidatableCacheListener extends CacheListener {
-    public void setValidator(TXCallBackValidator v);
 
-    public void validate();
+  private interface ValidatableCacheListener extends CacheListener {
+    void setValidator(TXCallBackValidator v);
 
-    public void validateNoEvents();
+    void validate();
 
-    public void reset();
+    void validateNoEvents();
 
-    public void setExpectedCount(int count);
+    void reset();
 
-    public int getCallCount();
+    void setExpectedCount(int count);
+
+    int getCallCount();
   }
-  private static interface ValidatableCacheWriter extends CacheWriter {
-    public void setValidator(TXCallBackValidator v);
 
-    public int getCallCount();
+  private interface ValidatableCacheWriter extends CacheWriter {
+    void setValidator(TXCallBackValidator v);
+
+    int getCallCount();
 
-    public void localDestroyMakeup(int count);
+    void localDestroyMakeup(int count);
 
-    public void validate();
+    void validate();
 
-    public void reset();
+    void reset();
 
-    public void validateNoEvents();
+    void validateNoEvents();
   }
 
-  // Test to make sure CacheListener callbacks are called in place with
-  // the CacheEvents properly constructed
+  /**
+   * Test to make sure CacheListener callbacks are called in place with the CacheEvents properly
+   * constructed
+   */
   @Test
   public void testCacheCallbacks() throws CacheException {
     final String key1 = "Key1";
@@ -3314,7 +3382,7 @@ public class TXJUnitTest {
     final String value2 = "value2";
     final String callBackArg = "call back arg";
     // install listeners
-    AttributesMutator mutator = this.region.getAttributesMutator();
+    AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
 
     TXCallBackValidator cbv = new TXCallBackValidator();
 
@@ -3325,20 +3393,24 @@ public class TXJUnitTest {
       int prevCallCount;
       EntryEvent lastEvent;
 
+      @Override
       public void validate() {
         this.v.validate(this.lastEvent, this.callCount);
       }
 
-      public void validate(EntryEvent event) {
+      void validate(EntryEvent event) {
         this.v.validate(event, ++this.callCount);
       }
 
+      @Override
       public void setValidator(TXCallBackValidator v) {
         this.v = v;
       }
 
+      @Override
       public void close() {}
 
+      @Override
       public void afterCreate(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3353,6 +3425,7 @@ public class TXJUnitTest {
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void afterUpdate(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3367,6 +3440,7 @@ public class TXJUnitTest {
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void afterInvalidate(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3374,13 +3448,14 @@ public class TXJUnitTest {
         }
         validate(event);
         this.v.setPassedValidation(false);
-        assertTrue("IsInvaldiate Assertion!", this.v.isInvalidate());
+        assertTrue("IsInvalidate Assertion!", this.v.isInvalidate());
         assertTrue(event.getRegion().containsKey(this.v.getKey()));
         assertTrue(!event.getRegion().containsValueForKey(this.v.getKey()));
         assertNull(event.getRegion().getEntry(event.getKey()).getValue());
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void afterDestroy(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3395,38 +3470,45 @@ public class TXJUnitTest {
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void afterRegionInvalidate(RegionEvent event) {
-        fail("Unexpected invokation of afterRegionInvalidate");
+        fail("Unexpected invocation of afterRegionInvalidate");
       }
 
+      @Override
       public void afterRegionDestroy(RegionEvent event) {
         if (!event.getOperation().isClose()) {
-          fail("Unexpected invokation of afterRegionDestroy");
+          fail("Unexpected invocation of afterRegionDestroy");
         }
       }
 
-      public void afterRegionClear(RegionEvent event) {
-
-      }
+      @Override
+      public void afterRegionClear(RegionEvent event) {}
 
+      @Override
       public void afterRegionCreate(RegionEvent event) {}
 
+      @Override
       public void afterRegionLive(RegionEvent event) {}
 
+      @Override
       public void reset() {
         lastEvent = null;
         prevCallCount = callCount;
       }
 
+      @Override
       public void validateNoEvents() {
         assertNull("Did not expect listener callback", lastEvent);
         assertEquals(prevCallCount, callCount);
       }
 
+      @Override
       public void setExpectedCount(int count) {
         callCount = count;
       }
 
+      @Override
       public int getCallCount() {
         return callCount;
       }
@@ -3442,28 +3524,34 @@ public class TXJUnitTest {
       int prevCallCount;
       EntryEvent lastEvent;
 
+      @Override
       public int getCallCount() {
         return this.callCount;
       }
 
+      @Override
       public void localDestroyMakeup(int count) {
         this.callCount += count;
       }
 
+      @Override
       public void validate() {
         this.v.validate(this.lastEvent, this.callCount);
       }
 
-      public void validate(EntryEvent event) {
+      void validate(EntryEvent event) {
         this.v.validate(event, ++this.callCount);
       }
 
+      @Override
       public void setValidator(TXCallBackValidator v) {
         this.v = v;
       }
 
+      @Override
       public void close() {}
 
+      @Override
       public void beforeCreate(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3478,6 +3566,7 @@ public class TXJUnitTest {
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void beforeUpdate(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3487,11 +3576,12 @@ public class TXJUnitTest {
         this.v.setPassedValidation(false);
         assertTrue("IsUpdate Assertion!", this.v.isUpdate());
         assertTrue(event.getRegion().containsKey(this.v.getKey()));
-        // Can not assert the following line, as the value being update may be invalide
+        // Can not assert the following line, as the value being update may be invalid
         // assertTrue(event.getRegion().containsValueForKey(this.v.getKey()));
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void beforeDestroy(EntryEvent event) {
         lastEvent = event;
         if (this.v.isSuspendValidation()) {
@@ -3504,19 +3594,23 @@ public class TXJUnitTest {
         this.v.setPassedValidation(true);
       }
 
+      @Override
       public void beforeRegionDestroy(RegionEvent event) {
         fail("Unexpected invocation of beforeRegionDestroy");
       }
 
+      @Override
       public void beforeRegionClear(RegionEvent event) {
         fail("Unexpected invocation of beforeRegionClear");
       }
 
+      @Override
       public void reset() {
         lastEvent = null;
         prevCallCount = callCount;
       }
 
+      @Override
       public void validateNoEvents() {
         assertNull("Did not expect a writer event", lastEvent);
         assertEquals(prevCallCount, callCount);
@@ -3529,10 +3623,12 @@ public class TXJUnitTest {
     mutator.setCacheLoader(new CacheLoader() {
       int count = 0;
 
+      @Override
       public Object load(LoaderHelper helper) throws CacheLoaderException {
-        return new Integer(count++);
+        return count++;
       }
 
+      @Override
       public void close() {}
     });
 
@@ -3739,7 +3835,7 @@ public class TXJUnitTest {
 
     // Create load Event tests
     int loaderValCheck = 0;
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     cbv.setCallBackArg(null);
     cbv.setOldValue(null, false);
     cbv.setIsDistributed(true);
@@ -3760,7 +3856,7 @@ public class TXJUnitTest {
     vCl.reset();
     this.txMgr.begin();
     cbv.setTXId(txMgr.getTransactionId());
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     cbv.setExpectedCount(appCallCount++);
     this.region.get(key1);
     this.txMgr.rollback();
@@ -3771,7 +3867,7 @@ public class TXJUnitTest {
 
     this.txMgr.begin();
     cbv.setTXId(txMgr.getTransactionId());
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     cbv.setExpectedCount(appCallCount++);
     this.region.get(key1);
     vCw.validate();
@@ -3790,7 +3886,7 @@ public class TXJUnitTest {
     cbv.suspendValidation(false);
     assertTrue(this.region.containsKey(key1));
     assertTrue(!this.region.containsValueForKey(key1));
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     cbv.setOldValue(null, false);
     cbv.setIsDistributed(true);
     cbv.setCallBackArg(null);
@@ -3813,7 +3909,7 @@ public class TXJUnitTest {
     this.txMgr.begin();
     cbv.setTXId(txMgr.getTransactionId());
     cbv.setExpectedCount(appCallCount++);
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     this.region.get(key1);
     vCw.validate();
     vCw.reset();
@@ -3828,7 +3924,7 @@ public class TXJUnitTest {
     this.txMgr.begin();
     cbv.setTXId(txMgr.getTransactionId());
     cbv.setExpectedCount(appCallCount++);
-    cbv.setNewValue(new Integer(loaderValCheck++), false);
+    cbv.setNewValue(loaderValCheck++, false);
     this.region.get(key1);
     this.txMgr.rollback();
     assertTrue("TX Invalidate Validation Assertion", cbv.passedValidation());
@@ -3901,9 +3997,9 @@ public class TXJUnitTest {
 
   @Test
   public void testCollections() throws CacheException {
-    Region reg1 = this.region;
+    Region<String, String> reg1 = this.region;
 
-    checkSubRegionCollecection(reg1);
+    checkSubRegionCollection(reg1);
 
     {
       Collection nonTxKeys = reg1.keySet();
@@ -3955,28 +4051,20 @@ public class TXJUnitTest {
         assertTrue(!txIt.hasNext());
       }
       reg1.invalidate("key1");
-      // assertIndexDetailsEquals(0, nonTxKeys.size());
       assertEquals(1, txKeys.size());
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(0, txValues.size());
       assertTrue(txKeys.contains("key1"));
       assertTrue(!txValues.contains("value1"));
       reg1.create("key2", "value2");
       reg1.create("key3", "value3");
-      // assertIndexDetailsEquals(0, nonTxKeys.size());
       assertEquals(3, txKeys.size());
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(2, txValues.size());
       reg1.put("key1", "value1");
-      // assertIndexDetailsEquals(0, nonTxKeys.size());
       assertEquals(3, txKeys.size());
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(3, txValues.size());
       reg1.localInvalidate("key2");
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(2, txValues.size());
       reg1.invalidate("key1");
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(1, txValues.size());
       reg1.destroy("key2");
       reg1.destroy("key3");
@@ -4011,11 +4099,9 @@ public class TXJUnitTest {
       txIt.hasNext();
     }
     {
-      // Collection nonTxValues = reg1.values();
       this.txMgr.begin();
       reg1.create("key1", "value1");
       Collection txValues = reg1.values();
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       assertEquals(1, txValues.size());
       assertTrue(txValues.contains("value1"));
       {
@@ -4085,7 +4171,6 @@ public class TXJUnitTest {
       assertEquals(1, txValues.size());
       assertTrue(txValues.iterator().hasNext());
       assertEquals("txValue1", txValues.iterator().next());
-      // assertIndexDetailsEquals(0, nonTxValues.size());
       // non-TX collections can now be used in a transactional context
       try {
         nonTxValues.iterator().hasNext();
@@ -4104,18 +4189,23 @@ public class TXJUnitTest {
     }
   }
 
-  /**
-   * @param reg1
-   */
-  protected void checkSubRegionCollecection(Region reg1) {
-    AttributesFactory af = new AttributesFactory();
-    af.setScope(Scope.DISTRIBUTED_NO_ACK);
-    Region sub1 = this.region.createSubregion("collectionSub1", af.create());
-    af = new AttributesFactory();
-    Region sub2 = this.region.createSubregion("collectionSub2", af.create());
-    af = new AttributesFactory();
-    af.setScope(Scope.LOCAL);
-    Region sub2_1 = sub2.createSubregion("collectionSub2_1", af.create());
+  protected void checkSubRegionCollection(Region<String, String> reg1) {
+    AttributesFactory<String, String> attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+
+    Region<String, String> sub1 =
+        this.region.createSubregion("collectionSub1", attributesFactory.create());
+
+    attributesFactory = new AttributesFactory<>();
+
+    Region<String, String> sub2 =
+        this.region.createSubregion("collectionSub2", attributesFactory.create());
+
+    attributesFactory = new AttributesFactory<>();
+    attributesFactory.setScope(Scope.LOCAL);
+
+    Region<String, String> sub2_1 =
+        sub2.createSubregion("collectionSub2_1", attributesFactory.create());
 
     checkCollectionSize(0);
     try {
@@ -4304,7 +4394,6 @@ public class TXJUnitTest {
     checkCollectionSize(2, 3);
     sub1.destroyRegion();
     checkCollectionSize(2);
-    // this.txMgr.rollback();
 
     reg1.localDestroy("key1");
     reg1.localDestroy("key3");
@@ -4313,18 +4402,20 @@ public class TXJUnitTest {
 
   @Test
   public void testLoader() throws CacheException {
-    LocalRegion reg1 = (LocalRegion) this.region;
-    AttributesMutator mutator = reg1.getAttributesMutator();
-    mutator.setCacheLoader(new CacheLoader() {
+    AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
+    mutator.setCacheLoader(new CacheLoader<String, String>() {
       int count = 0;
 
-      public Object load(LoaderHelper helper) throws CacheLoaderException {
+      @Override
+      public String load(LoaderHelper helper) throws CacheLoaderException {
         count++;
         return "LV " + count;
       }
 
+      @Override
       public void close() {}
     });
+    LocalRegion reg1 = (LocalRegion) this.region;
     if (isPR())
       ((PartitionedRegion) reg1).setHaveCacheLoader();
     assertTrue(!reg1.containsKey("key1"));
@@ -4596,26 +4687,26 @@ public class TXJUnitTest {
     final CachePerfStats stats = this.cache.getCachePerfStats();
 
     class statsValidator {
-      long txSuccessLifeTime;
-      long txFailedLifeTime;
-      long txRollbackLifeTime;
-      int txCommits;
-      int txFailures;
-      int txRollbacks;
-      long txCommitTime;
-      long txFailureTime;
-      long txRollbackTime;
-      int txCommitChanges;
-      int txFailureChanges;
-      int txRollbackChanges;
-
-      CachePerfStats stats;
-
-      statsValidator(CachePerfStats stats) {
+      private long txSuccessLifeTime;
+      private long txFailedLifeTime;
+      private long txRollbackLifeTime;
+      private int txCommits;
+      private int txFailures;
+      private int txRollbacks;
+      private long txCommitTime;
+      private long txFailureTime;
+      private long txRollbackTime;
+      private int txCommitChanges;
+      private int txFailureChanges;
+      private int txRollbackChanges;
+
+      private CachePerfStats stats;
+
+      private statsValidator(CachePerfStats stats) {
         this.stats = stats;
       }
 
-      void reset() {
+      private void reset() {
         this.txSuccessLifeTime = this.stats.getTxSuccessLifeTime();
         this.txFailedLifeTime = this.stats.getTxFailedLifeTime();
         this.txRollbackLifeTime = this.stats.getTxRollbackLifeTime();
@@ -4630,55 +4721,55 @@ public class TXJUnitTest {
         this.txRollbackChanges = this.stats.getTxRollbackChanges();
       }
 
-      void setTxSuccessLifeTime(long txSuccessLifeTime) {
+      private void setTxSuccessLifeTime(long txSuccessLifeTime) {
         this.txSuccessLifeTime = txSuccessLifeTime;
       }
 
-      void setTxFailedLifeTime(long txFailedLifeTime) {
+      private void setTxFailedLifeTime(long txFailedLifeTime) {
         this.txFailedLifeTime = txFailedLifeTime;
       }
 
-      void setTxRollbackLifeTime(long txRollbackLifeTime) {
+      private void setTxRollbackLifeTime(long txRollbackLifeTime) {
         this.txRollbackLifeTime = txRollbackLifeTime;
       }
 
-      void setTxCommits(int txCommits) {
+      private void setTxCommits(int txCommits) {
         this.txCommits = txCommits;
       }
 
-      void setTxFailures(int txFailures) {
+      private void setTxFailures(int txFailures) {
         this.txFailures = txFailures;
       }
 
-      void setTxRollbacks(int txRollbacks) {
+      private void setTxRollbacks(int txRollbacks) {
         this.txRollbacks = txRollbacks;
       }
 
-      void setTxCommitTime(long txCommitTime) {
+      private void setTxCommitTime(long txCommitTime) {
         this.txCommitTime = txCommitTime;
       }
 
-      void setTxFailureTime(long txFailureTime) {
+      private void setTxFailureTime(long txFailureTime) {
         this.txFailureTime = txFailureTime;
       }
 
-      void setTxRollbackTime(long txRollbackTime) {
+      private void setTxRollbackTime(long txRollbackTime) {
         this.txRollbackTime = txRollbackTime;
       }
 
-      void setTxCommitChanges(int txCommitChanges) {
+      private void setTxCommitChanges(int txCommitChanges) {
         this.txCommitChanges = txCommitChanges;
       }
 
-      void setTxFailureChanges(int txFailureChanges) {
+      private void setTxFailureChanges(int txFailureChanges) {
         this.txFailureChanges = txFailureChanges;
       }
 
-      void setTxRollbackChanges(int txRollbackChanges) {
+      private void setTxRollbackChanges(int txRollbackChanges) {
         this.txRollbackChanges = txRollbackChanges;
       }
 
-      void assertValid() {
+      private void assertValid() {
         assertEquals(this.txRollbacks, this.stats.getTxRollbacks());
         assertEquals(this.txRollbackChanges, this.stats.getTxRollbackChanges());
         if (Boolean
@@ -5000,9 +5091,9 @@ public class TXJUnitTest {
   @Test
   public void testCheckNoTX() {
     {
-      AttributesFactory af = new AttributesFactory();
+      AttributesFactory<String, String> af = new AttributesFactory<>();
       af.setScope(Scope.GLOBAL);
-      Region gr = null;
+      Region<String, String> gr = null;
       try {
         gr = this.cache.createRegion("GLOBALTXTest", af.create());
       } catch (CacheException ex) {
@@ -5028,11 +5119,11 @@ public class TXJUnitTest {
     {
       DiskStoreFactory dsf = this.cache.createDiskStoreFactory();
       dsf.create("testCheckNoTX");
-      AttributesFactory af = new AttributesFactory();
+      AttributesFactory<String, String> af = new AttributesFactory<>();
       af.setScope(Scope.LOCAL);
       af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
       af.setDiskStoreName("testCheckNoTX");
-      Region dr = null;
+      Region<String, String> dr = null;
       try {
         dr = this.cache.createRegion("DiskTXTest", af.create());
       } catch (CacheException ex) {
@@ -5569,13 +5660,8 @@ public class TXJUnitTest {
       fail("expected CommitConflictException");
     } catch (TransactionException ex) {
     }
-    // this.region is now destroyed
-
   }
 
-  /**
-   * @param txMgrImpl
-   */
   protected void checkUserAttributeConflict(final CacheTransactionManager txMgrImpl) {
     { // now check entry user attribute conflict checking
       this.region.put("key1", "value0");
@@ -5685,7 +5771,6 @@ public class TXJUnitTest {
       assertEquals(0, te.getEvents().size());
       this.region.destroy("key1");
 
-
       // now make sure that multiple invalidates of same entry are a single change
       txRollbackChanges = stats.getTxRollbackChanges();
       this.region.create("key1", "value1");
@@ -5741,7 +5826,7 @@ public class TXJUnitTest {
     }
   }
 
-  final static void clearRegion(Region r) throws TimeoutException {
+  private static void clearRegion(Region r) throws TimeoutException {
     Iterator kI = r.keySet().iterator();
     try {
       while (kI.hasNext()) {
@@ -5752,13 +5837,13 @@ public class TXJUnitTest {
     }
   }
 
-  final static int LRUENTRY_NULL = 0;
-  final static int LRUENTRY_STRING = 1;
-  final static int LRUENTRY_INTEGER = 2;
-  final static int LRUENTRY_LONG = 3;
-  final static int LRUENTRY_DOUBLE = 4;
+  private final static int LRUENTRY_NULL = 0;
+  private final static int LRUENTRY_STRING = 1;
+  private final static int LRUENTRY_INTEGER = 2;
+  private final static int LRUENTRY_LONG = 3;
+  private final static int LRUENTRY_DOUBLE = 4;
 
-  final static void assertLRUEntries(Set entries, int size, String keyPrefix, int instanceId) {
+  private static void assertLRUEntries(Set entries, int size, String keyPrefix, int instanceId) {
     assertEquals(size, entries.size());
     Iterator entItr = entries.iterator();
     while (entItr.hasNext()) {
@@ -5790,11 +5875,11 @@ public class TXJUnitTest {
   @Test
   public void testEviction() throws CacheException {
     final int lruSize = 8;
-    AttributesFactory af = new AttributesFactory();
+    AttributesFactory<String, Object> af = new AttributesFactory<>();
     af.setEvictionAttributes(
         EvictionAttributes.createLRUEntryAttributes(lruSize, EvictionAction.LOCAL_DESTROY));
     af.setScope(Scope.LOCAL);
-    Region lruRegion = this.cache.createRegion(getUniqueName(), af.create());
+    Region<String, Object> lruRegion = this.cache.createRegion(getUniqueName(), af.create());
 
     // Non-TX LRU verification
     assertEquals(0, lruRegion.entrySet(false).size());
@@ -5849,11 +5934,10 @@ public class TXJUnitTest {
     }
     clearRegion(lruRegion);
 
-
     // TX/non-TX no conflict verification w/ invalid initial state
     // full+2, all committed entries have TX refs using a loader
     {
-      AttributesMutator mutator = lruRegion.getAttributesMutator();
+      AttributesMutator<String, Object> mutator = lruRegion.getAttributesMutator();
       mutator.setCacheLoader(new CacheLoader() {
         // int count = 0;
         public Object load(LoaderHelper helper) throws CacheLoaderException {
@@ -5898,7 +5982,6 @@ public class TXJUnitTest {
     }
     clearRegion(lruRegion);
 
-
     // TX/TX/non-TX no conflict verification w/ initial state full, TX
     // add lruLimit+4, existing committed have TX 2 refs, force non-TX
     // eviction, force TX eviction
@@ -5932,11 +6015,6 @@ public class TXJUnitTest {
       tx2 = txMgrImpl.internalSuspend();
 
       assertLRUEntries(lruRegion.entrySet(false), lruSize, "key", LRUENTRY_INTEGER);
-      // LocalRegion lrReg = (LocalRegion) lruRegion;
-      // LRUClockNode lruE = null;
-      // assertNotNull(lruE = (LRUClockNode) lrReg.basicGetEntry("key"+(numToPut-1)));
-      // assertIndexDetailsEquals(2, lruE.getRefCount());
-      // assertIndexDetailsEquals(lruSize, lruRegion.entrySet(false).size());
 
       // Force the Non-Tx "put" to remove each attempt since region is full
       // and all the committed entries are currently part of a TX
@@ -6067,21 +6145,25 @@ public class TXJUnitTest {
 
     javax.transaction.TransactionManager jtaTxMgr = this.cache.getJTATransactionManager();
     TransactionListener tl = new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         ++listenerAfterCommit;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         ++listenerAfterFailedCommit;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         ++listenerAfterRollback;
         te = event;
       }
 
+      @Override
       public void close() {
         ++listenerClose;
       }
@@ -6180,6 +6262,7 @@ public class TXJUnitTest {
       // a conflict
       final int signal[] = {0};
       Thread t = new Thread("non-TX conflict generator") {
+        @Override
         public void run() {
           try {
             region.put("syncKey4", "syncVal4");
@@ -6237,21 +6320,25 @@ public class TXJUnitTest {
       javax.transaction.HeuristicMixedException, javax.transaction.HeuristicRollbackException {
 
     TransactionListener tl = new TransactionListener() {
+      @Override
       public void afterCommit(TransactionEvent event) {
         ++listenerAfterCommit;
         te = event;
       }
 
+      @Override
       public void afterFailedCommit(TransactionEvent event) {
         ++listenerAfterFailedCommit;
         te = event;
       }
 
+      @Override
       public void afterRollback(TransactionEvent event) {
         ++listenerAfterRollback;
         te = event;
       }
 
+      @Override
       public void close() {
         ++listenerClose;
       }
@@ -6270,8 +6357,7 @@ public class TXJUnitTest {
       fail("Expected to get a healthy UserTransaction!");
     }
 
-
-    // Test enlistement for put
+    // Test enlistment for put
     // Test enlisted rollback
     // Test prevention of rollback/commit for enlisted transaction
     assertEquals(0, this.listenerAfterRollback);
@@ -6301,7 +6387,7 @@ public class TXJUnitTest {
     assertTrue(!this.region.containsKey("enlistKey"));
     assertEquals(1, this.listenerAfterRollback);
 
-    // Test enlistement for create
+    // Test enlistment for create
     // Test commit
     assertEquals(0, this.listenerAfterCommit);
     userTx.begin();
@@ -6314,7 +6400,7 @@ public class TXJUnitTest {
     assertEquals("enlistVal", this.region.getEntry("enlistKey").getValue());
     assertEquals(1, this.listenerAfterCommit);
 
-    // Test enlistement for get
+    // Test enlistment for get
     assertEquals(1, this.listenerAfterCommit);
     userTx.begin();
     assertEquals("enlistVal", this.region.get("enlistKey"));
@@ -6323,7 +6409,7 @@ public class TXJUnitTest {
     assertNull(this.txMgr.getTransactionId());
     assertEquals(2, this.listenerAfterCommit);
 
-    // Test enlistement for invalidate
+    // Test enlistment for invalidate
     assertEquals(2, this.listenerAfterCommit);
     userTx.begin();
     this.region.invalidate("enlistKey");
@@ -6336,7 +6422,7 @@ public class TXJUnitTest {
     assertTrue(!this.region.containsValueForKey("enlistKey"));
     assertEquals(3, this.listenerAfterCommit);
 
-    // Test enlistement for destroy
+    // Test enlistment for destroy
     assertEquals(3, this.listenerAfterCommit);
     userTx.begin();
     this.region.destroy("enlistKey");
@@ -6348,24 +6434,26 @@ public class TXJUnitTest {
     assertEquals(4, this.listenerAfterCommit);
 
     // Test enlistment for load
-    AttributesMutator mutator = this.region.getAttributesMutator();
-    mutator.setCacheLoader(new CacheLoader() {
+    AttributesMutator<String, String> mutator = this.region.getAttributesMutator();
+    mutator.setCacheLoader(new CacheLoader<String, String>() {
       int count = 0;
 
-      public Object load(LoaderHelper helper) throws CacheLoaderException {
-        return new Integer(count++);
+      @Override
+      public String load(LoaderHelper helper) throws CacheLoaderException {
+        return String.valueOf(count++);
       }
 
+      @Override
       public void close() {}
     });
     assertEquals(4, this.listenerAfterCommit);
     userTx.begin();
-    assertEquals(new Integer(0), this.region.get("enlistKey"));
+    assertEquals("0", this.region.get("enlistKey"));
     assertNotNull(this.txMgr.getTransactionId());
     userTx.commit();
     assertNull(this.txMgr.getTransactionId());
     assertTrue(this.region.containsKey("enlistKey"));
-    assertEquals(new Integer(0), this.region.getEntry("enlistKey").getValue());
+    assertEquals("0", this.region.getEntry("enlistKey").getValue());
     assertEquals(5, this.listenerAfterCommit);
     mutator.setCacheLoader(null);
 


[5/6] geode git commit: Risky refactorings

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
index 9bd20a6..83f1da3 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
@@ -14,13 +14,31 @@
  */
 package org.apache.geode.cache.query.dunit;
 
-import static org.junit.Assert.fail;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.test.dunit.IgnoredException.*;
+import static org.apache.geode.test.dunit.Invoke.*;
+import static org.apache.geode.test.dunit.LogWriterUtils.*;
+import static org.assertj.core.api.Assertions.*;
+import static org.awaitility.Awaitility.*;
+
+import java.io.File;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
 
 import org.apache.commons.io.FileUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.LogWriter;
 import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheExistsException;
-import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.query.Index;
 import org.apache.geode.cache.query.Query;
@@ -33,95 +51,78 @@ import org.apache.geode.cache.query.internal.QueryObserverHolder;
 import org.apache.geode.cache.query.internal.index.IndexManager;
 import org.apache.geode.cache.query.internal.index.PartitionedIndex;
 import org.apache.geode.cache30.CacheSerializableRunnable;
-import org.apache.geode.distributed.internal.DistributionConfig;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.DistributedTestUtils;
 import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
-import org.apache.geode.test.dunit.SerializableRunnable;
-import org.apache.geode.test.dunit.ThreadUtils;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.util.test.TestUtil;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Properties;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 
 @Category(DistributedTest.class)
 public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
 
-  static private final String WAIT_PROPERTY = "QueryIndexBuckets.maxWaitTime";
-
-  static private final int WAIT_DEFAULT = (60 * 1000);
-
-  public static final long MAX_TIME = Integer.getInteger(WAIT_PROPERTY, WAIT_DEFAULT);
-
-  final String name = "PartionedPortfolios";
-  final String repRegName = "Portfolios";
-  final String persistentRegName = "PersistentPrPortfolios";
-  final String nameWithRange = "PartitionedPortfoliosWithRange";
-  final String nameWithHash = "PartionedPortfoliosWithHash";
-  final String repRegNameWithRange = "PortfoliosWithRange";
-  final String repRegNameWithHash = "PortfoliosWithHash";
-  final String persistentRegNameWithRange = "PersistentPrPortfoliosWithRange";
-  final String persistentRegNameWithHash = "PersistentPrPortfoliosWithHash";
-  final String noIndexRepReg = "PortfoliosNoIndex";
-  final String statusIndex = "statusIndex";
-  final String idIndex = "idIndex";
-
-  String queryStr[][] = new String[][] {
-      {"Select * from /" + name + " where ID > 10",
-          "Select * from /" + repRegName + " where ID > 10",
-          "Select * from /" + persistentRegName + " where ID > 10",},
-      {"Select * from /" + name + " where ID = 5", "Select * from /" + repRegName + " where ID = 5",
-          "Select * from /" + persistentRegName + " where ID = 5",
-          "Select * from /" + nameWithHash + " where ID = 5",
-          "Select * from /" + repRegNameWithHash + " where ID = 5",
-          "Select * from /" + persistentRegNameWithHash + " where ID = 5"},
-      {"Select * from /" + name + " where status = 'active'",
-          "Select * from /" + repRegName + " where status = 'active'",
-          "Select * from /" + persistentRegName + " where status = 'active'",
-          "Select * from /" + nameWithHash + " where status = 'active'",
-          "Select * from /" + repRegNameWithHash + " where status = 'active'",
-          "Select * from /" + persistentRegNameWithHash + " where status = 'active'",},};
-
-  String queryStrNoIndex[] = new String[] {"Select * from /" + noIndexRepReg + " where ID > 10",
-      "Select * from /" + noIndexRepReg + " where ID = 5",
-      "Select * from /" + noIndexRepReg + " where status = 'active'",};
-
-  String queryStrValid = "Select * from /" + noIndexRepReg + " where ID > 10";
-
-  private String persistentOverFlowRegName = "PersistentOverflowPortfolios";
-
-  @Override
-  public final void postSetUp() throws Exception {
-    // Workaround for #52008
-    IgnoredException.addIgnoredException("Failed to create index");
+  private static final String NAME = "PartitionedPortfolios";
+  private static final String REP_REG_NAME = "Portfolios";
+  private static final String PERSISTENT_REG_NAME = "PersistentPrPortfolios";
+  private static final String NAME_WITH_RANGE = "PartitionedPortfoliosWithRange";
+  private static final String NAME_WITH_HASH = "PartitionedPortfoliosWithHash";
+  private static final String REP_REG_NAME_WITH_RANGE = "PortfoliosWithRange";
+  private static final String REP_REG_NAME_WITH_HASH = "PortfoliosWithHash";
+  private static final String PERSISTENT_REG_NAME_WITH_RANGE = "PersistentPrPortfoliosWithRange";
+  private static final String PERSISTENT_REG_NAME_WITH_HASH = "PersistentPrPortfoliosWithHash";
+  private static final String NO_INDEX_REP_REG = "PortfoliosNoIndex";
+  private static final String STATUS_INDEX = "statusIndex";
+  private static final String ID_INDEX = "idIndex";
+
+  private static final String[][] QUERY_STR = new String[][] {
+      {"Select * from /" + NAME + " where ID > 10",
+          "Select * from /" + REP_REG_NAME + " where ID > 10",
+          "Select * from /" + PERSISTENT_REG_NAME + " where ID > 10",},
+      {"Select * from /" + NAME + " where ID = 5",
+          "Select * from /" + REP_REG_NAME + " where ID = 5",
+          "Select * from /" + PERSISTENT_REG_NAME + " where ID = 5",
+          "Select * from /" + NAME_WITH_HASH + " where ID = 5",
+          "Select * from /" + REP_REG_NAME_WITH_HASH + " where ID = 5",
+          "Select * from /" + PERSISTENT_REG_NAME_WITH_HASH + " where ID = 5"},
+      {"Select * from /" + NAME + " where status = 'active'",
+          "Select * from /" + REP_REG_NAME + " where status = 'active'",
+          "Select * from /" + PERSISTENT_REG_NAME + " where status = 'active'",
+          "Select * from /" + NAME_WITH_HASH + " where status = 'active'",
+          "Select * from /" + REP_REG_NAME_WITH_HASH + " where status = 'active'",
+          "Select * from /" + PERSISTENT_REG_NAME_WITH_HASH + " where status = 'active'"}};
+
+  private static final String[] QUERY_STR_NO_INDEX =
+      new String[] {"Select * from /" + NO_INDEX_REP_REG + " where ID > 10",
+          "Select * from /" + NO_INDEX_REP_REG + " where ID = 5",
+          "Select * from /" + NO_INDEX_REP_REG + " where status = 'active'"};
+
+  private static final String PERSISTENT_OVER_FLOW_REG_NAME = "PersistentOverflowPortfolios";
+
+  private static final String CACHE_XML_FILE_NAME = "IndexCreation.xml";
+
+  private File cacheXmlFile;
+
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+
+  @Before
+  public void before() throws Exception {
+    addIgnoredException("Failed to create index");
+
+    URL url = getClass().getResource(CACHE_XML_FILE_NAME);
+    assertThat(url).isNotNull(); // precondition
+
+    this.cacheXmlFile = this.temporaryFolder.newFile(CACHE_XML_FILE_NAME);
+    FileUtils.copyURLToFile(url, this.cacheXmlFile);
+    assertThat(this.cacheXmlFile).exists(); // precondition
   }
 
-  @Override
-  public final void postTearDownCacheTestCase() throws Exception {
-    // avoid creating a new cache just to get the diskstore name
-    Invoke.invokeInEveryVM(resetTestHook());
+  @After
+  public void after() throws Exception {
+    invokeInEveryVM(resetTestHook());
     disconnectFromDS();
-    File deleteMe = new File(GemFireCacheImpl.DEFAULT_DS_NAME).getAbsoluteFile();
-    if (deleteMe.exists())
-      FileUtils.forceDelete(deleteMe);
   }
 
   /**
@@ -129,68 +130,53 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateIndexThroughXML() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
 
-    AsyncInvocation asyInvk0 =
-        vm0.invokeAsync(createIndexThrougXML("vm0testCreateIndexThroughXML", name, fileName));
+    AsyncInvocation async0 = vm0.invokeAsync(createIndexThroughXML(NAME));
+    AsyncInvocation async1 = vm1.invokeAsync(createIndexThroughXML(NAME));
 
-    AsyncInvocation asyInvk1 =
-        vm1.invokeAsync(createIndexThrougXML("vm1testCreateIndexThroughXML", name, fileName));
-
-    ThreadUtils.join(asyInvk1, 30 * 1000);
-    if (asyInvk1.exceptionOccurred()) {
-      Assert.fail("asyInvk1 failed", asyInvk1.getException());
-    }
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    async1.await();
+    async0.await();
 
     // Check index for PR
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(name, idIndex, -1));
-    vm1.invoke(prIndexCreationCheck(name, idIndex, -1));
-    vm0.invoke(prIndexCreationCheck(name, "secIndex", -1));
-    vm1.invoke(prIndexCreationCheck(name, "secIndex", -1));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME, ID_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME, ID_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME, "secIndex", -1));
+    vm1.invoke(prIndexCreationCheck(NAME, "secIndex", -1));
 
     // Check index for replicated
-    vm0.invoke(indexCreationCheck(repRegName, statusIndex));
-    vm1.invoke(indexCreationCheck(repRegName, statusIndex));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, STATUS_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, STATUS_INDEX));
 
     // Check index for persistent pr region
-    vm0.invoke(prIndexCreationCheck(persistentRegName, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(persistentRegName, statusIndex, -1));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, -1));
 
     // check range index creation
-    vm0.invoke(prIndexCreationCheck(nameWithRange, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(nameWithRange, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(nameWithRange, idIndex, -1));
-    vm1.invoke(prIndexCreationCheck(nameWithRange, idIndex, -1));
-    vm0.invoke(indexCreationCheck(repRegNameWithRange, statusIndex));
-    vm1.invoke(indexCreationCheck(repRegNameWithRange, statusIndex));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithRange, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithRange, statusIndex, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_RANGE, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_RANGE, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_RANGE, ID_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_RANGE, ID_INDEX, -1));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_RANGE, STATUS_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_RANGE, STATUS_INDEX));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_RANGE, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_RANGE, STATUS_INDEX, -1));
 
     // check hash index creation
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, idIndex, -1));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, idIndex, -1));
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, statusIndex));
-    vm1.invoke(indexCreationCheck(repRegNameWithHash, statusIndex));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithHash, statusIndex, -1));
-
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, -1));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, STATUS_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, STATUS_INDEX));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, STATUS_INDEX, -1));
   }
 
   /**
@@ -198,53 +184,47 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateIndexWhileDoingGII() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
+    vm0.invoke(createIndexThroughXML(NAME));
 
-    vm0.invoke(createIndexThrougXML("vm0testCreateIndexWhileDoingGII", name, fileName));
     // LoadRegion
-    vm0.invoke(loadRegion(name));
-    vm0.invoke(loadRegion(nameWithHash));
-    vm0.invoke(loadRegion(nameWithRange));
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(nameWithRange, statusIndex, -1));
+    vm0.invoke(loadRegion(NAME));
+    vm0.invoke(loadRegion(NAME_WITH_HASH));
+    vm0.invoke(loadRegion(NAME_WITH_RANGE));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_RANGE, STATUS_INDEX, -1));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testCreateIndexWhileDoingGII", name, fileName));
+    vm1.invoke(createIndexThroughXML(NAME));
 
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(name, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, idIndex, 50));
-    vm0.invoke(prIndexCreationCheck(name, "secIndex", 50));
-    vm1.invoke(prIndexCreationCheck(name, "secIndex", 50));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, ID_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME, "secIndex", 50));
+    vm1.invoke(prIndexCreationCheck(NAME, "secIndex", 50));
 
     // check range index creation
-    vm0.invoke(prIndexCreationCheck(nameWithRange, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithRange, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(nameWithRange, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithRange, idIndex, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_RANGE, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_RANGE, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_RANGE, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_RANGE, ID_INDEX, 50));
 
     // check hash index creation
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, idIndex, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, 50));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQuery(name));
-    vm1.invoke(executeQuery(name));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(executeQuery(NAME));
+    vm1.invoke(executeQuery(NAME));
   }
 
   /**
@@ -252,50 +232,42 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testReplicatedRegionCreateIndexWhileDoingGII() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
+    vm0.invoke(createIndexThroughXML(REP_REG_NAME));
 
-    vm0.invoke(
-        createIndexThrougXML("vm0testRRegionCreateIndexWhileDoingGII", repRegName, fileName));
     // LoadRegion
-    vm0.invoke(loadRegion(repRegName));
-    vm0.invoke(loadRegion(repRegNameWithHash));
-    vm0.invoke(loadRegion(noIndexRepReg));
-    vm0.invoke(indexCreationCheck(repRegName, statusIndex));
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, statusIndex));
+    vm0.invoke(loadRegion(REP_REG_NAME));
+    vm0.invoke(loadRegion(REP_REG_NAME_WITH_HASH));
+    vm0.invoke(loadRegion(NO_INDEX_REP_REG));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, STATUS_INDEX));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, STATUS_INDEX));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(
-        createIndexThrougXML("vm1testRRegionCreateIndexWhileDoingGII", repRegName, fileName));
+    vm1.invoke(createIndexThroughXML(REP_REG_NAME));
 
-    vm0.invoke(indexCreationCheck(repRegName, statusIndex));
-    vm1.invoke(indexCreationCheck(repRegName, statusIndex));
-    vm0.invoke(indexCreationCheck(repRegName, idIndex));
-    vm1.invoke(indexCreationCheck(repRegName, idIndex));
-    vm0.invoke(indexCreationCheck(repRegName, "secIndex"));
-    vm1.invoke(indexCreationCheck(repRegName, "secIndex"));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, STATUS_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, STATUS_INDEX));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, ID_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, ID_INDEX));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, "secIndex"));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, "secIndex"));
 
     // check hash index creation
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, statusIndex));
-    vm1.invoke(indexCreationCheck(repRegNameWithHash, statusIndex));
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, idIndex));
-    vm1.invoke(indexCreationCheck(repRegNameWithHash, idIndex));
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, "secIndex"));
-    vm1.invoke(indexCreationCheck(repRegNameWithHash, "secIndex"));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, STATUS_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, STATUS_INDEX));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, ID_INDEX));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, ID_INDEX));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, "secIndex"));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, "secIndex"));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQuery(repRegName));
-    vm1.invoke(executeQuery(repRegName));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(executeQuery(REP_REG_NAME));
+    vm1.invoke(executeQuery(REP_REG_NAME));
   }
 
   /**
@@ -303,63 +275,51 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testPersistentPRRegionCreateIndexWhileDoingGII() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
+    vm0.invoke(createIndexThroughXML(PERSISTENT_REG_NAME));
 
-    vm0.invoke(createIndexThrougXML("vm0testPersistentPRRegion", persistentRegName, fileName));
     // LoadRegion
-    vm0.invoke(loadRegion(this.persistentRegName));
-    vm0.invoke(loadRegion(noIndexRepReg));
-    vm0.invoke(loadRegion(persistentRegNameWithHash));
-    vm0.invoke(prIndexCreationCheck(persistentRegName, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, statusIndex, -1));
+    vm0.invoke(loadRegion(PERSISTENT_REG_NAME));
+    vm0.invoke(loadRegion(NO_INDEX_REP_REG));
+    vm0.invoke(loadRegion(PERSISTENT_REG_NAME_WITH_HASH));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, STATUS_INDEX, -1));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testPersistentPRRegion", persistentRegName, fileName));
+    vm1.invoke(createIndexThroughXML(PERSISTENT_REG_NAME));
 
-    vm0.invoke(prIndexCreationCheck(persistentRegName, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegName, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(persistentRegName, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegName, idIndex, 50));
-    vm0.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, ID_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, "secIndex", 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, "secIndex", 50));
 
     // check hash index creation
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithHash, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithHash, idIndex, 50));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, "secIndex", 50));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithHash, "secIndex", 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, ID_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, "secIndex", 50));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, "secIndex", 50));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQuery(persistentRegName));
-    vm1.invoke(executeQuery(persistentRegName));
+    vm0.invoke(executeQuery(PERSISTENT_REG_NAME));
+    vm1.invoke(executeQuery(PERSISTENT_REG_NAME));
 
     // close one vm cache
     vm1.invoke(resetTestHook());
-    vm1.invoke(new SerializableRunnable() {
-
-      @Override
-      public void run() {
-        closeCache();
-      }
-    });
+    vm1.invoke(() -> closeCache());
 
     // restart
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testPersistentPRRegion", persistentRegName, fileName));
-    vm1.invoke(prIndexCreationCheck(persistentRegName, statusIndex, 50));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm1.invoke(createIndexThroughXML(PERSISTENT_REG_NAME));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, STATUS_INDEX, 50));
   }
 
   /**
@@ -367,37 +327,29 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateIndexWhileDoingGIIWithEmptyPRRegion() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
-
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("### in testCreateIndexWhileDoingGIIWithEmptyPRRegion.");
 
+    getLogWriter().info("### in testCreateIndexWhileDoingGIIWithEmptyPRRegion.");
 
-    vm0.invoke(createIndexThrougXML("vm0testGIIWithEmptyPRRegion", name, fileName));
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, -1));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, -1));
+    vm0.invoke(createIndexThroughXML(NAME));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, -1));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testGIIWithEmptyPRRegion", name, fileName));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, -1));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, statusIndex, -1));
+    vm1.invoke(createIndexThroughXML(NAME));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, -1));
 
     // LoadRegion
-    vm0.invoke(loadRegion(name));
-    vm0.invoke(loadRegion(nameWithHash));
+    vm0.invoke(loadRegion(NAME));
+    vm0.invoke(loadRegion(NAME_WITH_HASH));
 
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
   }
 
   /**
@@ -405,47 +357,30 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateAsyncIndexWhileDoingGII() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
 
-    AsyncInvocation asyInvk0 =
-        vm0.invokeAsync(createIndexThrougXML("vm0testAsyncIndexWhileDoingGII", name, fileName));
+    AsyncInvocation async0 = vm0.invokeAsync(createIndexThroughXML(NAME));
 
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    async0.await();
 
     // LoadRegion
-    asyInvk0 = vm0.invokeAsync(loadRegion(name));
+    async0 = vm0.invokeAsync(loadRegion(NAME));
 
     vm1.invoke(setTestHook());
-    AsyncInvocation asyInvk1 =
-        vm1.invokeAsync(createIndexThrougXML("vm1testAsyncIndexWhileDoingGII", name, fileName));
 
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
+    AsyncInvocation async1 = vm1.invokeAsync(createIndexThroughXML(NAME));
 
-    ThreadUtils.join(asyInvk1, 30 * 1000);
-    if (asyInvk1.exceptionOccurred()) {
-      Assert.fail("asyInvk1 failed", asyInvk1.getException());
-    }
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
 
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
+    async1.await();
 
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
 
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    async0.await();
   }
 
   /**
@@ -453,60 +388,54 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateIndexWhileDoingGIIAndCompareQueryResults() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
+    vm0.invoke(createIndexThroughXML(NAME));
 
-    vm0.invoke(createIndexThrougXML("vm0testIndexCompareQResults", name, fileName));
     // LoadRegion
-    vm0.invoke(loadRegion(name));
-    vm0.invoke(loadRegion(repRegName));
-    vm0.invoke(loadRegion(persistentRegName));
-    vm0.invoke(loadRegion(noIndexRepReg));
-    vm0.invoke(loadRegion(nameWithHash));
-    vm0.invoke(loadRegion(repRegNameWithHash));
-    vm0.invoke(loadRegion(persistentRegNameWithHash));
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, -1));
+    vm0.invoke(loadRegion(NAME));
+    vm0.invoke(loadRegion(REP_REG_NAME));
+    vm0.invoke(loadRegion(PERSISTENT_REG_NAME));
+    vm0.invoke(loadRegion(NO_INDEX_REP_REG));
+    vm0.invoke(loadRegion(NAME_WITH_HASH));
+    vm0.invoke(loadRegion(REP_REG_NAME_WITH_HASH));
+    vm0.invoke(loadRegion(PERSISTENT_REG_NAME_WITH_HASH));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, -1));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testIndexCompareQResults", name, fileName));
-
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(name, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, idIndex, 50));
-    vm0.invoke(prIndexCreationCheck(name, "secIndex", 50));
-    vm1.invoke(prIndexCreationCheck(name, "secIndex", 50));
-
-    vm0.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, statusIndex, 50));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, idIndex, 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, idIndex, 50));
-    vm0.invoke(prIndexCreationCheck(nameWithHash, "secIndex", 50));
-    vm1.invoke(prIndexCreationCheck(nameWithHash, "secIndex", 50));
-
-    vm0.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
-    vm0.invoke(indexCreationCheck(repRegName, "secIndex"));
-    vm0.invoke(prIndexCreationCheck(persistentRegNameWithHash, "secIndex", 50));
-    vm0.invoke(indexCreationCheck(repRegNameWithHash, "secIndex"));
-
-    vm1.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
-    vm1.invoke(indexCreationCheck(repRegName, "secIndex"));
-    vm1.invoke(prIndexCreationCheck(persistentRegNameWithHash, "secIndex", 50));
-    vm1.invoke(indexCreationCheck(repRegNameWithHash, "secIndex"));
+    vm1.invoke(createIndexThroughXML(NAME));
+
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, ID_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME, "secIndex", 50));
+    vm1.invoke(prIndexCreationCheck(NAME, "secIndex", 50));
+
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, STATUS_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, ID_INDEX, 50));
+    vm0.invoke(prIndexCreationCheck(NAME_WITH_HASH, "secIndex", 50));
+    vm1.invoke(prIndexCreationCheck(NAME_WITH_HASH, "secIndex", 50));
+
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, "secIndex", 50));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME, "secIndex"));
+    vm0.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, "secIndex", 50));
+    vm0.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, "secIndex"));
+
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, "secIndex", 50));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, "secIndex"));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME_WITH_HASH, "secIndex", 50));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME_WITH_HASH, "secIndex"));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQueryAndCompareResult(name, true));
-    vm1.invoke(executeQueryAndCompareResult(name, true));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(executeQueryAndCompareResult(true));
+    vm1.invoke(executeQueryAndCompareResult(true));
   }
 
   /**
@@ -514,439 +443,303 @@ public class QueryIndexUsingXMLDUnitTest extends JUnit4CacheTestCase {
    */
   @Test
   public void testCreateAsyncIndexWhileDoingGIIAndQuery() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
 
-    AsyncInvocation asyInvk0 =
-        vm0.invokeAsync(createIndexThrougXML("vm0testCreateAsyncIndexGIIAndQuery", name, fileName));
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    AsyncInvocation async0 = vm0.invokeAsync(createIndexThroughXML(NAME));
+
+    async0.await();
 
     // LoadRegion
-    asyInvk0 = vm0.invokeAsync(loadRegion(name));
+    async0 = vm0.invokeAsync(loadRegion(NAME));
 
     vm1.invoke(setTestHook());
-    AsyncInvocation asyInvk1 =
-        vm1.invokeAsync(createIndexThrougXML("vm1testCreateAsyncIndexGIIAndQuery", name, fileName));
 
+    AsyncInvocation async1 = vm1.invokeAsync(createIndexThroughXML(NAME));
 
-    ThreadUtils.join(asyInvk1, 30 * 1000);
-    if (asyInvk1.exceptionOccurred()) {
-      Assert.fail("asyInvk1 failed", asyInvk1.getException());
-    }
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    async1.await();
+    async0.await();
 
-    vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
-    vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
+    vm0.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
+    vm1.invoke(prIndexCreationCheck(NAME, STATUS_INDEX, 50));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQuery(name));
-    vm1.invoke(executeQuery(name));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(executeQuery(NAME));
+    vm1.invoke(executeQuery(NAME));
   }
 
   /**
-   * Creates asynch indexes and compares the results between index and non-index results.
+   * Creates async indexes and compares the results between index and non-index results.
    * <p>
-   * DISABLED. This test is disabled due to a high rate of failure. See ticket #52167
+   * DISABLED. This test is disabled due to a high rate of throw new AssertionError. See ticket
+   * #52167
    */
   @Ignore("TODO: test is disabled because of #52167")
   @Test
   public void testCreateAsyncIndexWhileDoingGIIAndCompareQueryResults() throws Exception {
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
+    vm0.invoke(createIndexThroughXML(NAME));
 
-    vm0.invoke(createIndexThrougXML("vm0testAsyncIndexAndCompareQResults", name, fileName));
     // LoadRegion
-    vm0.invoke(loadRegion(name));
-    vm0.invoke(loadRegion(repRegName));
-    vm0.invoke(loadRegion(persistentRegName));
-    vm0.invoke(loadRegion(noIndexRepReg));
+    vm0.invoke(loadRegion(NAME));
+    vm0.invoke(loadRegion(REP_REG_NAME));
+    vm0.invoke(loadRegion(PERSISTENT_REG_NAME));
+    vm0.invoke(loadRegion(NO_INDEX_REP_REG));
 
     // Start async update
-    vm0.invokeAsync(loadRegion(name, 500));
-    vm0.invokeAsync(loadRegion(repRegName, 500));
-    AsyncInvocation asyInvk0 = vm0.invokeAsync(loadRegion(persistentRegName, 500));
-    vm0.invokeAsync(loadRegion(noIndexRepReg, 500));
+    vm0.invokeAsync(loadRegion(NAME, 500));
+    vm0.invokeAsync(loadRegion(REP_REG_NAME, 500));
+
+    AsyncInvocation async0 = vm0.invokeAsync(loadRegion(PERSISTENT_REG_NAME, 500));
+
+    vm0.invokeAsync(loadRegion(NO_INDEX_REP_REG, 500));
 
     vm1.invoke(setTestHook());
-    vm1.invoke(createIndexThrougXML("vm1testAsyncIndexAndCompareQResults", name, fileName));
+    vm1.invoke(createIndexThroughXML(NAME));
 
-    ThreadUtils.join(asyInvk0, 30 * 1000);
-    if (asyInvk0.exceptionOccurred()) {
-      Assert.fail("asyInvk0 failed", asyInvk0.getException());
-    }
+    async0.await();
 
-    vm1.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
-    vm1.invoke(indexCreationCheck(repRegName, "secIndex"));
+    vm1.invoke(prIndexCreationCheck(PERSISTENT_REG_NAME, "secIndex", 50));
+    vm1.invoke(indexCreationCheck(REP_REG_NAME, "secIndex"));
 
     // Execute query and verify index usage
-    vm0.invoke(executeQueryAndCompareResult(name, false));
-    vm1.invoke(executeQueryAndCompareResult(name, false));
-
-    vm1.invoke(resetTestHook());
-    vm0.invoke(close());
-    vm1.invoke(close());
+    vm0.invoke(executeQueryAndCompareResult(false));
+    vm1.invoke(executeQueryAndCompareResult(false));
   }
 
   @Test
   public void testIndexCreationForReplicatedPersistentOverFlowRegionOnRestart() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    final String fileName = "IndexCreation.xml";
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-        .info("Creating index using an xml file name : " + fileName);
+    getLogWriter().info("Creating index using an xml file name : " + CACHE_XML_FILE_NAME);
+
     // create index using xml
-    vm0.invoke(
-        createIndexThrougXML("vm0testIndexCreationForReplicatedPersistentOverFlowRegionOnRestart",
-            persistentOverFlowRegName, fileName));
+    vm0.invoke(createIndexThroughXML(PERSISTENT_OVER_FLOW_REG_NAME));
     // verify index creation
-    vm0.invoke(indexCreationCheck(persistentOverFlowRegName, statusIndex));
+    vm0.invoke(indexCreationCheck(PERSISTENT_OVER_FLOW_REG_NAME, STATUS_INDEX));
     // LoadRegion
-    vm0.invoke(loadRegion(persistentOverFlowRegName));
+    vm0.invoke(loadRegion(PERSISTENT_OVER_FLOW_REG_NAME));
     // close cache without deleting diskstore
     vm0.invoke(closeWithoutDeletingDiskStore());
     // start cache by recovering data from diskstore
-    vm0.invoke(
-        createIndexThrougXML("vm0testIndexCreationForReplicatedPersistentOverFlowRegionOnRestart",
-            persistentOverFlowRegName, fileName));
+    vm0.invoke(createIndexThroughXML(PERSISTENT_OVER_FLOW_REG_NAME));
     // verify index creation on restart
-    vm0.invoke(indexCreationCheck(persistentOverFlowRegName, statusIndex));
-    // close cache and delete diskstore
-    vm0.invoke(close());
-
+    vm0.invoke(indexCreationCheck(PERSISTENT_OVER_FLOW_REG_NAME, STATUS_INDEX));
   }
 
-  public CacheSerializableRunnable setTestHook() {
-    SerializableRunnable sr = new CacheSerializableRunnable("TestHook") {
+  private CacheSerializableRunnable setTestHook() {
+    return new CacheSerializableRunnable("TestHook") {
+      @Override
       public void run2() {
         class IndexTestHook implements IndexManager.TestHook {
-          public boolean indexCreatedAsPartOfGII;
-
-          public void hook(int spot) throws RuntimeException {
-            GemFireCacheImpl.getInstance().getLogger()
-                .fine("In IndexTestHook.hook(). hook() argument value is : " + spot);
+          @Override
+          public void hook(int spot) {
+            getLogWriter().fine("In IndexTestHook.hook(). hook() argument value is : " + spot);
             if (spot == 1) {
               throw new RuntimeException("Index is not created as part of Region GII.");
             }
           }
-        };
+        }
         IndexManager.testHook = new IndexTestHook();
       }
     };
-    return (CacheSerializableRunnable) sr;
   }
 
-
-  public CacheSerializableRunnable resetTestHook() {
-    SerializableRunnable sr = new CacheSerializableRunnable("TestHook") {
+  private CacheSerializableRunnable resetTestHook() {
+    return new CacheSerializableRunnable("TestHook") {
+      @Override
       public void run2() {
         IndexManager.testHook = null;
       }
     };
-    return (CacheSerializableRunnable) sr;
   }
 
-  public CacheSerializableRunnable createIndexThrougXML(final String vmid, final String regionName,
-      final String xmlFileName) {
-    SerializableRunnable sr = new CacheSerializableRunnable("RegionCreator") {
+  private CacheSerializableRunnable createIndexThroughXML(final String regionName) {
+    return new CacheSerializableRunnable("RegionCreator") {
+      @Override
       public void run2() {
-        try {
-          // closeCache();
-          File file = findFile(xmlFileName);
-          GemFireCacheImpl.testCacheXml = file;
-          // DistributedTestCase.diskStore = vmid;
-          getSystem();
-          Cache cache = getCache();
-          Region region = cache.getRegion(regionName);
-          if (region == null) {
-            fail("Region not found." + regionName);
-          }
-        } finally {
-          GemFireCacheImpl.testCacheXml = null;
-          // DistributedTestCase.diskStore = null;
-        }
+        Properties properties = new Properties();
+        properties.setProperty(CACHE_XML_FILE, cacheXmlFile.getAbsolutePath());
+        getSystem(properties);
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
+
+        assertThat(region).isNotNull();
       }
     };
-    return (CacheSerializableRunnable) sr;
   }
 
-  public CacheSerializableRunnable prIndexCreationCheck(final String regionName,
+  private CacheSerializableRunnable prIndexCreationCheck(final String regionName,
       final String indexName, final int bucketCount) {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable(
-        "pr IndexCreationCheck" + regionName + " indexName :" + indexName) {
+    return new CacheSerializableRunnable(
+        "pr IndexCreationCheck " + regionName + " indexName :" + indexName) {
+      @Override
       public void run2() {
-        // closeCache();
         Cache cache = getCache();
         LogWriter logger = cache.getLogger();
         PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        Map indexMap = region.getIndex();
         PartitionedIndex index = (PartitionedIndex) region.getIndex().get(indexName);
-        if (index == null) {
-          fail("Index " + indexName + " Not Found for region " + regionName);
-        }
-        logger.info("Current number of buckets indexed : " + ""
-            + ((PartitionedIndex) index).getNumberOfIndexedBuckets());
+        assertThat(index).isNotNull();
+
+        logger.info("Current number of buckets indexed: " + index.getNumberOfIndexedBuckets());
         if (bucketCount >= 0) {
-          waitForIndexedBuckets((PartitionedIndex) index, bucketCount);
-        }
-        if (!index.isPopulated()) {
-          fail("Index isPopulatedFlag is not set to true");
+          waitForIndexedBuckets(index, bucketCount);
         }
+        assertThat(index.isPopulated()).isTrue();
       }
     };
-    return sr;
   }
 
-  public CacheSerializableRunnable indexCreationCheck(final String regionName,
+  private CacheSerializableRunnable indexCreationCheck(final String regionName,
       final String indexName) {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable(
-        "IndexCreationCheck region: " + regionName + " indexName :" + indexName) {
+    return new CacheSerializableRunnable(
+        "IndexCreationCheck region: " + regionName + " indexName:" + indexName) {
+      @Override
       public void run2() {
-        // closeCache();
         Cache cache = getCache();
-        LogWriter logger = cache.getLogger();
         LocalRegion region = (LocalRegion) cache.getRegion(regionName);
         Index index = region.getIndexManager().getIndex(indexName);
-        if (index == null) {
-          fail("Index " + indexName + " Not Found for region name:" + regionName);
-        }
+        assertThat(index).isNotNull();
       }
     };
-    return sr;
   }
 
-  public boolean waitForIndexedBuckets(final PartitionedIndex index, final int bucketCount) {
-
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        return (index.getNumberOfIndexedBuckets() >= bucketCount);
-      }
-
-      public String description() {
-        return "Number of Indexed Bucket is less than the expected number. " + bucketCount + ", "
-            + index.getNumberOfIndexedBuckets();
-      }
-    };
-    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
-    return true;
+  private void waitForIndexedBuckets(final PartitionedIndex index, final int bucketCount) {
+    await().atMost(2, MINUTES).until(() -> index.getNumberOfIndexedBuckets() >= bucketCount);
   }
 
-  public CacheSerializableRunnable loadRegion(final String name) {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable("load region on " + name) {
+  private CacheSerializableRunnable loadRegion(final String name) {
+    return new CacheSerializableRunnable("load region on " + name) {
+      @Override
       public void run2() {
         Cache cache = getCache();
-        LogWriter logger = cache.getLogger();
         Region region = cache.getRegion(name);
         for (int i = 0; i < 100; i++) {
-          region.put("" + i, new Portfolio(i));
+          region.put(i, new Portfolio(i));
         }
       }
     };
-    return sr;
   }
 
-  public CacheSerializableRunnable loadRegion(final String name, final int size) {
-    CacheSerializableRunnable sr =
-        new CacheSerializableRunnable("LoadRegion: " + name + " size :" + size) {
-          public void run2() {
-            Cache cache = getCache();
-            LogWriter logger = cache.getLogger();
-            Region region = cache.getRegion(name);
-            for (int i = 0; i < size; i++) {
-              region.put("" + i, new Portfolio(i));
-            }
-          }
-        };
-    return sr;
+  private CacheSerializableRunnable loadRegion(final String name, final int size) {
+    return new CacheSerializableRunnable("LoadRegion: " + name + " size :" + size) {
+      @Override
+      public void run2() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(name);
+        for (int i = 0; i < size; i++) {
+          region.put(i, new Portfolio(i));
+        }
+      }
+    };
   }
 
-  public CacheSerializableRunnable executeQuery(final String rname) {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable("execute query on " + rname) {
+  private CacheSerializableRunnable executeQuery(final String regionName) {
+    return new CacheSerializableRunnable("execute query on " + regionName) {
+      @Override
       public void run2() {
         QueryService qs = getCache().getQueryService();
         QueryObserverImpl observer = new QueryObserverImpl();
         QueryObserverHolder.setInstance(observer);
-        String queryStr = "Select * from /" + rname + " where ID > 10";
-        Query query = qs.newQuery(queryStr);
+        String queryString = "Select * from /" + regionName + " where ID > 10";
+        Query query = qs.newQuery(queryString);
         try {
           query.execute();
         } catch (Exception ex) {
-          fail("Failed to execute the query.");
-        }
-        if (!observer.isIndexesUsed) {
-          fail("Index not used for query. " + queryStr);
+          throw new AssertionError("Failed to execute the query.", ex);
         }
+        assertThat(observer.isIndexesUsed).isTrue().as("Index not used for query. " + queryString);
       }
     };
-    return sr;
   }
 
-  public CacheSerializableRunnable executeQueryAndCompareResult(final String rname,
-      final boolean compareHash) {
-    CacheSerializableRunnable sr =
-        new CacheSerializableRunnable("execute query and compare results.") {
-          public void run2() {
-            QueryService qs = getCache().getQueryService();
-
-            StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
-            SelectResults[][] sr = new SelectResults[1][2];
-            String s[] = new String[2];
-            for (int j = 0; j < queryStr.length; j++) {
-              String[] queryArray = queryStr[j];
-              int numQueriesToCheck = compareHash ? queryArray.length : 3;
-              for (int i = 0; i < numQueriesToCheck; i++) {
-                QueryObserverImpl observer = new QueryObserverImpl();
-                QueryObserverHolder.setInstance(observer);
-                // Query using index.
-                s[0] = queryStr[j][i];
-                // Execute query with index.
-                Query query = qs.newQuery(s[0]);
-
-                try {
-                  sr[0][0] = (SelectResults) query.execute();
-                } catch (Exception ex) {
-                  fail("Failed to execute the query.");
-                }
-                if (!observer.isIndexesUsed) {
-                  fail("Index not used for query. " + s[0]);
-                }
-
-                // Query using no index.
-                s[1] = queryStrNoIndex[j];
-                try {
-                  query = qs.newQuery(s[1]);
-                  sr[0][1] = (SelectResults) query.execute();
-                } catch (Exception ex) {
-                  fail("Failed to execute the query on no index region.");
-                }
-
-                // compare.
-                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()
-                    .info("Execute query : \n queryStr with index: " + s[0]
-                        + " \n queryStr without index: " + s[1]);
-                ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, s);
-              }
+  private CacheSerializableRunnable executeQueryAndCompareResult(final boolean compareHash) {
+    return new CacheSerializableRunnable("execute query and compare results.") {
+      @Override
+      public void run2() {
+        QueryService qs = getCache().getQueryService();
+
+        StructSetOrResultsSet resultsSet = new StructSetOrResultsSet();
+        SelectResults[][] selectResults = new SelectResults[1][2];
+        String[] queryStrings = new String[2];
+
+        int numQueries = QUERY_STR.length;
+        for (int j = 0; j < numQueries; j++) {
+          String[] queryArray = QUERY_STR[j];
+          int numQueriesToCheck = compareHash ? queryArray.length : 3;
+          for (int i = 0; i < numQueriesToCheck; i++) {
+            QueryObserverImpl observer = new QueryObserverImpl();
+            QueryObserverHolder.setInstance(observer);
+            // Query using index.
+            queryStrings[0] = QUERY_STR[j][i];
+            // Execute query with index.
+            Query query = qs.newQuery(queryStrings[0]);
+
+            try {
+              selectResults[0][0] = (SelectResults) query.execute();
+            } catch (Exception ex) {
+              throw new AssertionError("Failed to execute the query.", ex);
+            }
+            assertThat(observer.isIndexesUsed).isTrue()
+                .as("Index not used for query. " + queryStrings[0]);
+
+            // Query using no index.
+            queryStrings[1] = QUERY_STR_NO_INDEX[j];
+            try {
+              query = qs.newQuery(queryStrings[1]);
+              selectResults[0][1] = (SelectResults) query.execute();
+            } catch (Exception ex) {
+              throw new AssertionError("Failed to execute the query on no index region.", ex);
             }
-          }
-        };
-    return sr;
-  }
 
-  public CacheSerializableRunnable closeWithoutDeletingDiskStore() {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable("close") {
-      public void run2() {
-        IndexManager.testHook = null;
-        // close the cache.
-        closeCache();
-        disconnectFromDS();
+            // compare.
+            getLogWriter().info("Execute query : " + System.getProperty("line.separator")
+                + " QUERY_STR with index: " + queryStrings[0] + " "
+                + System.getProperty("line.separator") + " QUERY_STR without index: "
+                + queryStrings[1]);
+            resultsSet.CompareQueryResultsWithoutAndWithIndexes(selectResults, 1, queryStrings);
+          }
+        }
       }
     };
-    return sr;
   }
 
-  public CacheSerializableRunnable close() {
-    CacheSerializableRunnable sr = new CacheSerializableRunnable("close") {
+  private CacheSerializableRunnable closeWithoutDeletingDiskStore() {
+    return new CacheSerializableRunnable("close") {
+      @Override
       public void run2() {
         IndexManager.testHook = null;
-
-        // Get the disk store name.
-        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-        String diskStoreName = cache.getDefaultDiskStoreName();
-
         // close the cache.
         closeCache();
         disconnectFromDS();
-
-        // remove the disk store.
-        File diskDir = new File(diskStoreName).getAbsoluteFile();
-        try {
-          FileUtils.deleteDirectory(diskDir);
-        } catch (Exception ex) {
-          fail("Failed to delete the disDir");
-        }
       }
     };
-    return sr;
-  }
-
-  protected File findFile(String fileName) {
-    String path = TestUtil.getResourcePath(getClass(), fileName);
-    return new File(path);
   }
 
-  public final InternalDistributedSystem getSystem(String diskStoreId) {
-    new Exception("TEST DEBUG###" + diskStoreId).printStackTrace();
-    if (basicGetSystem() == null || !basicGetSystem().isConnected()) {
-      // Figure out our distributed system properties
-      Properties p =
-          DistributedTestUtils.getAllDistributedSystemProperties(getDistributedSystemProperties());
-      getSystem(p);
-    }
-    return basicGetSystem();
-  }
-
-  private Cache getCache(InternalDistributedSystem system) {
-    Cache cache = basicGetCache();
-    if (cache == null) {
-      try {
-        System.setProperty(
-            DistributionConfig.GEMFIRE_PREFIX + "DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
-        cache = CacheFactory.create(system);
-      } catch (CacheExistsException e) {
-        Assert.fail("the cache already exists", e);
-
-      } catch (RuntimeException ex) {
-        throw ex;
-
-      } catch (Exception ex) {
-        Assert.fail("Checked exception while initializing cache??", ex);
-      } finally {
-        System.clearProperty(
-            DistributionConfig.GEMFIRE_PREFIX + "DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
-      }
-    }
-    return cache;
-  }
+  private static class QueryObserverImpl extends QueryObserverAdapter {
 
-  public static class QueryObserverImpl extends QueryObserverAdapter {
-
-    boolean isIndexesUsed = false;
-    ArrayList indexesUsed = new ArrayList();
+    boolean isIndexesUsed;
+    List indexesUsed = new ArrayList();
 
     @Override
     public void beforeIndexLookup(Index index, int oper, Object key) {
-      indexesUsed.add(index.getName());
+      this.indexesUsed.add(index.getName());
     }
 
     @Override
     public void afterIndexLookup(Collection results) {
       if (results != null) {
-        isIndexesUsed = true;
+        this.isIndexesUsed = true;
       }
     }
   }
 }
-

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/CacheXml66DUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
index 01143ff..f9d9fd5 100644
--- a/geode-core/src/test/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
@@ -41,11 +41,8 @@ import java.util.Properties;
 
 import org.junit.Ignore;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 import org.xml.sax.SAXException;
 
-import com.company.app.DBLoader;
-
 import org.apache.geode.DataSerializable;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.AttributesFactory;
@@ -137,7 +134,6 @@ import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.NetworkUtils;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.util.test.TestUtil;
 
 /**
@@ -587,7 +583,8 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
     assertEquals("entry", chaqf.getEvictionPolicy());
     assertEquals(501, chaqf.getCapacity());
     File curDir = new File(".").getAbsoluteFile();
-    File lockFile = new File(curDir, "DRLK_IF" + GemFireCacheImpl.DEFAULT_DS_NAME + ".lk");
+    File lockFile =
+        new File(curDir, "DRLK_IF" + GemFireCacheImpl.getDefaultDiskStoreName() + ".lk");
     assertTrue(lockFile.exists());
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
index f8b1415..4ccf8c2 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
@@ -193,9 +193,10 @@ public abstract class JUnit4CacheTestCase extends JUnit4DistributedTestCase
         CacheXmlGenerator.generate(cache, pw);
         pw.close();
       } catch (IOException ex) {
-        Assert.fail("IOException during cache.xml generation to " + file, ex); // TODO: remove error
-                                                                               // handling
+        // TODO: remove error handling
+        Assert.fail("IOException during cache.xml generation to " + file, ex);
       }
+      // TODO: System.setProperty(GEMFIRE_PREFIX + CACHE_XML_FILE, file.getAbsolutePath());
       cache = null;
       GemFireCacheImpl.testCacheXml = file;
       try {

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
index 537d598..4175e81 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
@@ -14,12 +14,13 @@
  */
 package org.apache.geode.test.dunit.internal;
 
+import java.io.Serializable;
 import java.util.Properties;
 
 /**
  * Defines the {@code DistributedTestCase} methods that can be overridden by its subclasses.
  */
-public interface DistributedTestFixture {
+public interface DistributedTestFixture extends Serializable {
 
   /**
    * {@code preSetUp()} is invoked before {@code DistributedTestCase#setUp()}.

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java b/geode-core/src/test/java/org/apache/geode/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
index 9e00d0a..841036e 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
@@ -62,6 +62,6 @@ public class GetDefaultDiskStoreNameDUnitTest extends JUnit4DistributedTestCase
   }
 
   private String getDefaultDiskStoreName() {
-    return GemFireCacheImpl.DEFAULT_DS_NAME; // TODO: not thread safe
+    return GemFireCacheImpl.getDefaultDiskStoreName(); // TODO: not thread safe
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/java/org/apache/geode/test/dunit/tests/JUnit4GetDefaultDiskStoreNameDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/tests/JUnit4GetDefaultDiskStoreNameDUnitTest.java b/geode-core/src/test/java/org/apache/geode/test/dunit/tests/JUnit4GetDefaultDiskStoreNameDUnitTest.java
index e3e8cbb..1a45991 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/tests/JUnit4GetDefaultDiskStoreNameDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/tests/JUnit4GetDefaultDiskStoreNameDUnitTest.java
@@ -61,6 +61,6 @@ public class JUnit4GetDefaultDiskStoreNameDUnitTest extends JUnit4DistributedTes
   }
 
   private String getDefaultDiskStoreName() {
-    return GemFireCacheImpl.DEFAULT_DS_NAME; // TODO: not thread safe
+    return GemFireCacheImpl.getDefaultDiskStoreName(); // TODO: not thread safe
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/test/resources/org/apache/geode/cache/query/dunit/IndexCreation.xml
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/cache/query/dunit/IndexCreation.xml b/geode-core/src/test/resources/org/apache/geode/cache/query/dunit/IndexCreation.xml
index aa46ce8..953fe27 100644
--- a/geode-core/src/test/resources/org/apache/geode/cache/query/dunit/IndexCreation.xml
+++ b/geode-core/src/test/resources/org/apache/geode/cache/query/dunit/IndexCreation.xml
@@ -19,19 +19,19 @@
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Cache 7.0//EN" "http://www.gemstone.com/dtd/cache7_0.dtd">
   <cache>
-  <region name="PartionedPortfolios">
+  <region name="PartitionedPortfolios">
     <region-attributes>
       <partition-attributes redundant-copies="1" total-num-buckets="50"/>
       <subscription-attributes interest-policy="cache-content"/>
     </region-attributes>
     <index name="statusIndex">
-      <functional from-clause="/PartionedPortfolios p" expression="p.status"/>
+      <functional from-clause="/PartitionedPortfolios p" expression="p.status"/>
     </index>
     <index name="idIndex">
-      <functional from-clause="/PartionedPortfolios p" expression="p.ID"/>
+      <functional from-clause="/PartitionedPortfolios p" expression="p.ID"/>
     </index>
     <index name="secIndex">
-      <functional from-clause="/PartionedPortfolios p, p.positions.values pos" expression="pos.secId"/>
+      <functional from-clause="/PartitionedPortfolios p, p.positions.values pos" expression="pos.secId"/>
     </index>
   </region>
   <region name="Portfolios">
@@ -85,14 +85,14 @@
     <index name="idIndex" type="range" from-clause="/PersistentPrPortfoliosWithRange p" expression="p.ID"/>
     <index name="secIndex" from-clause="/PersistentPrPortfoliosWithRange p, p.positions.values pos" expression="pos.secId"/>
   </region>
-  <region name="PartionedPortfoliosWithHash">
+  <region name="PartitionedPortfoliosWithHash">
     <region-attributes>
       <partition-attributes redundant-copies="1" total-num-buckets="50"/>
       <subscription-attributes interest-policy="cache-content"/>
     </region-attributes>
-    <index name="statusIndex" type="hash" from-clause="/PartionedPortfoliosWithHash p" expression="p.status"/>
-    <index name="idIndex" type="hash" from-clause="/PartionedPortfoliosWithHash p" expression="p.ID"/>
-    <index name="secIndex" from-clause="/PartionedPortfoliosWithHash p, p.positions.values pos" expression="pos.secId"/>
+    <index name="statusIndex" type="hash" from-clause="/PartitionedPortfoliosWithHash p" expression="p.status"/>
+    <index name="idIndex" type="hash" from-clause="/PartitionedPortfoliosWithHash p" expression="p.ID"/>
+    <index name="secIndex" from-clause="/PartitionedPortfoliosWithHash p, p.positions.values pos" expression="pos.secId"/>
   </region>
   <region name="PortfoliosWithHash">
     <region-attributes scope="distributed-ack" data-policy="replicate">


[6/6] geode git commit: Risky refactorings

Posted by kl...@apache.org.
Risky refactorings


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b605f5d3
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b605f5d3
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b605f5d3

Branch: refs/heads/feature/GEODE-2632-6-1
Commit: b605f5d3de190f5b2a5c87f6dce4a308a1cea705
Parents: c5b8cbe
Author: Kirk Lund <kl...@apache.org>
Authored: Tue Apr 25 10:49:29 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Apr 25 11:03:36 2017 -0700

----------------------------------------------------------------------
 .../org/apache/geode/cache/query/Query.java     |    1 +
 .../geode/internal/cache/DiskStoreImpl.java     |    2 +-
 .../geode/internal/cache/GemFireCacheImpl.java  |  453 ++++----
 .../dunit/QueryIndexUsingXMLDUnitTest.java      | 1001 +++++++-----------
 .../geode/cache30/CacheXml66DUnitTest.java      |    7 +-
 .../cache/internal/JUnit4CacheTestCase.java     |    5 +-
 .../dunit/internal/DistributedTestFixture.java  |    3 +-
 .../tests/GetDefaultDiskStoreNameDUnitTest.java |    2 +-
 .../JUnit4GetDefaultDiskStoreNameDUnitTest.java |    2 +-
 .../geode/cache/query/dunit/IndexCreation.xml   |   16 +-
 10 files changed, 646 insertions(+), 846 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
index ade83a9..8a7b4a5 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
@@ -16,6 +16,7 @@
 package org.apache.geode.cache.query;
 
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.cache.execute.Function;
 import org.apache.geode.cache.execute.FunctionContext;
 import org.apache.geode.cache.execute.FunctionService;

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index d13b4a6..bbff29c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -2731,7 +2731,7 @@ public class DiskStoreImpl implements DiskStore {
     String name = getName();
 
     if (name == null) {
-      name = GemFireCacheImpl.DEFAULT_DS_NAME;
+      name = GemFireCacheImpl.getDefaultDiskStoreName();
     }
 
     return (name + "_" + getDiskStoreID().toString());

http://git-wip-us.apache.org/repos/asf/geode/blob/b605f5d3/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index 29e9f95..74ec96c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -17,6 +17,7 @@ package org.apache.geode.internal.cache;
 import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -66,6 +67,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.regex.Pattern;
 
 import javax.naming.Context;
 import javax.transaction.TransactionManager;
@@ -232,7 +234,7 @@ import org.apache.geode.redis.GeodeRedisServer;
 
 // TODO: somebody Come up with more reasonable values for {@link #DEFAULT_LOCK_TIMEOUT}, etc.
 /**
- * GemFire's implementation of a distributed {@link org.apache.geode.cache.Cache}.
+ * GemFire's implementation of a distributed {@link Cache}.
  */
 @SuppressWarnings("deprecation")
 public class GemFireCacheImpl
@@ -315,6 +317,8 @@ public class GemFireCacheImpl
   /** time in milliseconds */
   private static final int FIVE_HOURS = 5 * 60 * 60 * 1000;
 
+  private static final Pattern DOUBLE_BACKSLASH = Pattern.compile("\\\\");
+
   /** To test MAX_QUERY_EXECUTION_TIME option. */
   public int testMaxQueryExecutionTime = -1;
 
@@ -338,8 +342,9 @@ public class GemFireCacheImpl
 
   private final ConcurrentMap<String, Region<?, ?>> pathToRegion = new ConcurrentHashMap<>();
 
-  private volatile boolean isInitialized = false;
-  volatile boolean isClosing = false;
+  private volatile boolean isInitialized;
+
+  volatile boolean isClosing = false; // used in Stopper inner class
 
   /** Amount of time (in seconds) to wait for a distributed lock */
   private int lockTimeout = DEFAULT_LOCK_TIMEOUT;
@@ -454,7 +459,7 @@ public class GemFireCacheImpl
    * if this cache was forced to close due to a forced-disconnect or system failure, this keeps
    * track of the reason
    */
-  volatile Throwable disconnectCause = null;
+  volatile Throwable disconnectCause; // used in Stopper inner class
 
   /** context where this cache was created -- for debugging, really... */
   private Exception creationStack = null;
@@ -628,8 +633,7 @@ public class GemFireCacheImpl
   }
 
   /**
-   * This is for debugging cache-open issues (esp.
-   * {@link org.apache.geode.cache.CacheExistsException})
+   * This is for debugging cache-open issues (esp. {@link CacheExistsException})
    */
   @Override
   public String toString() {
@@ -961,7 +965,7 @@ public class GemFireCacheImpl
     // if server is not using cluster config
 
     Map<InternalDistributedMember, Collection<String>> scl =
-        this.getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
+        getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
 
     // If there are no locators with Shared configuration, that means the system has been started
     // without shared configuration
@@ -1053,7 +1057,7 @@ public class GemFireCacheImpl
     List<String> locatorConnectionStringList = new ArrayList<>();
 
     Map<InternalDistributedMember, Collection<String>> locatorsWithClusterConfig =
-        this.getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
+        getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
 
     // If there are no locators with Shared configuration, that means the system has been started
     // without shared configuration
@@ -1303,7 +1307,7 @@ public class GemFireCacheImpl
     if (!xmlFile.exists() || !xmlFile.isFile()) {
       // do a resource search
       String resource = xmlFile.getPath();
-      resource = resource.replaceAll("\\\\", "/");
+      resource = DOUBLE_BACKSLASH.matcher(resource).replaceAll("/");
       if (resource.length() > 1 && resource.startsWith("/")) {
         resource = resource.substring(1);
       }
@@ -1344,8 +1348,8 @@ public class GemFireCacheImpl
    *
    * @throws CacheXmlException If something goes wrong while parsing the declarative caching XML
    *         file.
-   * @throws TimeoutException If a {@link org.apache.geode.cache.Region#put(Object, Object)}times
-   *         out while initializing the cache.
+   * @throws TimeoutException If a {@link Region#put(Object, Object)}times out while initializing
+   *         the cache.
    * @throws CacheWriterException If a {@code CacheWriterException} is thrown while initializing the
    *         cache.
    * @throws RegionExistsException If the declarative caching XML file describes a region that
@@ -1402,12 +1406,7 @@ public class GemFireCacheImpl
       throw newEx;
 
     } finally {
-      if (stream != null) {
-        try {
-          stream.close();
-        } catch (IOException ignore) {
-        }
-      }
+      closeQuietly(stream);
     }
   }
 
@@ -1427,12 +1426,7 @@ public class GemFireCacheImpl
         }
       } catch (IOException ignore) {
       } finally {
-        if (br != null) {
-          try {
-            br.close();
-          } catch (IOException ignore) {
-          }
-        }
+        closeQuietly(br);
       }
       logger.info(
           LocalizedMessage.create(LocalizedStrings.GemFireCache_INITIALIZING_CACHE_USING__0__1,
@@ -1535,7 +1529,7 @@ public class GemFireCacheImpl
       try {
         nt.initCause(GemFireCacheImpl.this.disconnectCause);
         return new CacheClosedException(reason, throwable);
-      } catch (IllegalStateException e2) {
+      } catch (IllegalStateException ignore) {
         // Bug 39496 (JRockit related) Give up. The following
         // error is not entirely sane but gives the correct general picture.
         return new CacheClosedException(reason, GemFireCacheImpl.this.disconnectCause);
@@ -1648,12 +1642,11 @@ public class GemFireCacheImpl
     if (DEBUG) {
       System.err.println("DEBUG: Close cache servers");
     }
-    {
-      for (CacheServerImpl cacheServer : cache.allCacheServers) {
-        AcceptorImpl acceptor = cacheServer.getAcceptor();
-        if (acceptor != null) {
-          acceptor.emergencyClose();
-        }
+
+    for (CacheServerImpl cacheServer : cache.allCacheServers) {
+      AcceptorImpl acceptor = cacheServer.getAcceptor();
+      if (acceptor != null) {
+        acceptor.emergencyClose();
       }
     }
 
@@ -1708,7 +1701,7 @@ public class GemFireCacheImpl
       // it's already doing shutdown by another thread
       try {
         this.shutDownAllFinished.await();
-      } catch (InterruptedException e) {
+      } catch (InterruptedException ignore) {
         logger.debug(
             "Shutdown all interrupted while waiting for another thread to do the shutDownAll");
         Thread.currentThread().interrupt();
@@ -1742,7 +1735,7 @@ public class GemFireCacheImpl
           es.shutdown();
           try {
             es.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
-          } catch (InterruptedException e) {
+          } catch (InterruptedException ignore) {
             logger
                 .debug("Shutdown all interrupted while waiting for PRs to be shutdown gracefully.");
           }
@@ -1793,7 +1786,7 @@ public class GemFireCacheImpl
 
           // lock all the primary buckets
           Set<Entry<Integer, BucketRegion>> bucketEntries = dataStore.getAllLocalBuckets();
-          for (Map.Entry e : bucketEntries) {
+          for (Entry e : bucketEntries) {
             BucketRegion bucket = (BucketRegion) e.getValue();
             if (bucket == null || bucket.isDestroyed) {
               // bucket region could be destroyed in race condition
@@ -1846,7 +1839,7 @@ public class GemFireCacheImpl
           // idm is no longer online
           Set<InternalDistributedMember> membersToPersistOfflineEqual =
               partitionedRegion.getRegionAdvisor().adviseDataStore();
-          for (Map.Entry e : bucketEntries) {
+          for (Entry e : bucketEntries) {
             BucketRegion bucket = (BucketRegion) e.getValue();
             if (bucket == null || bucket.isDestroyed) {
               // bucket region could be destroyed in race condition
@@ -1954,7 +1947,7 @@ public class GemFireCacheImpl
   @Override
   public DistributedLockService getPartitionedRegionLockService() {
     synchronized (this.prLockServiceLock) {
-      stopper.checkCancelInProgress(null);
+      this.stopper.checkCancelInProgress(null);
       if (this.prLockService == null) {
         try {
           this.prLockService =
@@ -1981,7 +1974,7 @@ public class GemFireCacheImpl
   public DistributedLockService getGatewaySenderLockService() {
     if (this.gatewayLockService == null) {
       synchronized (this.gatewayLockServiceLock) {
-        stopper.checkCancelInProgress(null);
+        this.stopper.checkCancelInProgress(null);
         if (this.gatewayLockService == null) {
           try {
             this.gatewayLockService = DLockService.create(AbstractGatewaySender.LOCK_SERVICE_NAME,
@@ -2007,7 +2000,7 @@ public class GemFireCacheImpl
   private void destroyPartitionedRegionLockService() {
     try {
       DistributedLockService.destroy(PartitionedRegionHelper.PARTITION_LOCK_SERVICE_NAME);
-    } catch (IllegalArgumentException e) {
+    } catch (IllegalArgumentException ignore) {
       // DistributedSystem.disconnect may have already destroyed the DLS
     }
   }
@@ -2020,7 +2013,7 @@ public class GemFireCacheImpl
     if (DistributedLockService.getServiceNamed(AbstractGatewaySender.LOCK_SERVICE_NAME) != null) {
       try {
         DistributedLockService.destroy(AbstractGatewaySender.LOCK_SERVICE_NAME);
-      } catch (IllegalArgumentException e) {
+      } catch (IllegalArgumentException ignore) {
         // DistributedSystem.disconnect may have already destroyed the DLS
       }
     }
@@ -2028,7 +2021,7 @@ public class GemFireCacheImpl
 
   public HeapEvictor getHeapEvictor() {
     synchronized (this.heapEvictorLock) {
-      stopper.checkCancelInProgress(null);
+      this.stopper.checkCancelInProgress(null);
       if (this.heapEvictor == null) {
         this.heapEvictor = new HeapEvictor(this);
       }
@@ -2087,9 +2080,9 @@ public class GemFireCacheImpl
        * First close the ManagementService as it uses a lot of infra which will be closed by
        * cache.close()
        */
-      system.handleResourceEvent(ResourceEvent.CACHE_REMOVE, this);
+      this.system.handleResourceEvent(ResourceEvent.CACHE_REMOVE, this);
       if (this.resourceEventsListener != null) {
-        this.system.removeResourceListener(resourceEventsListener);
+        this.system.removeResourceListener(this.resourceEventsListener);
         this.resourceEventsListener = null;
       }
 
@@ -2103,7 +2096,7 @@ public class GemFireCacheImpl
       }
 
       this.keepAlive = keepAlive;
-      isClosing = true;
+      this.isClosing = true;
       logger.info(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_NOW_CLOSING, this));
 
       // Before anything else...make sure that this instance is not
@@ -2129,16 +2122,16 @@ public class GemFireCacheImpl
 
         try {
           this.resourceAdvisor.close();
-        } catch (CancelException e) {
+        } catch (CancelException ignore) {
           // ignore
         }
         try {
           this.jmxAdvisor.close();
-        } catch (CancelException e) {
+        } catch (CancelException ignore) {
           // ignore
         }
 
-        for (GatewaySender sender : this.getAllGatewaySenders()) {
+        for (GatewaySender sender : this.allGatewaySenders) {
           try {
             sender.stop();
             GatewaySenderAdvisor advisor = ((AbstractGatewaySender) sender).getSenderAdvisor();
@@ -2148,7 +2141,7 @@ public class GemFireCacheImpl
               }
               advisor.close();
             }
-          } catch (CancelException ce) {
+          } catch (CancelException ignore) {
           }
         }
 
@@ -2224,8 +2217,8 @@ public class GemFireCacheImpl
                 }
                 try {
                   lr.handleCacheClose(op);
-                } catch (Exception e) {
-                  if (isDebugEnabled || !forcedDisconnect) {
+                } catch (RuntimeException e) {
+                  if (isDebugEnabled || !this.forcedDisconnect) {
                     logger.warn(LocalizedMessage.create(
                         LocalizedStrings.GemFireCache_0_ERROR_CLOSING_REGION_1,
                         new Object[] {this, lr.getFullPath()}), e);
@@ -2252,15 +2245,15 @@ public class GemFireCacheImpl
           }
 
           closeDiskStores();
-          diskMonitor.close();
+          this.diskMonitor.close();
 
           // Close the CqService Handle.
           try {
             if (isDebugEnabled) {
               logger.debug("{}: closing CQ service...", this);
             }
-            cqService.close();
-          } catch (Exception ex) {
+            this.cqService.close();
+          } catch (RuntimeException ignore) {
             logger.info(LocalizedMessage.create(
                 LocalizedStrings.GemFireCache_FAILED_TO_GET_THE_CQSERVICE_TO_CLOSE_DURING_CACHE_CLOSE_1));
           }
@@ -2272,7 +2265,7 @@ public class GemFireCacheImpl
           }
           try {
             SystemMemberCacheEventProcessor.send(this, Operation.CACHE_CLOSE);
-          } catch (CancelException e) {
+          } catch (CancelException ignore) {
             if (logger.isDebugEnabled()) {
               logger.debug("Ignored cancellation while notifying admins");
             }
@@ -2284,30 +2277,30 @@ public class GemFireCacheImpl
           this.tombstoneService.stop();
 
           // NOTICE: the CloseCache message is the *last* message you can send!
-          DM dm = null;
+          DM distributionManager = null;
           try {
-            dm = system.getDistributionManager();
-            dm.removeMembershipListener(this.transactionManager);
-          } catch (CancelException e) {
-            // dm = null;
+            distributionManager = this.system.getDistributionManager();
+            distributionManager.removeMembershipListener(this.transactionManager);
+          } catch (CancelException ignore) {
+            // distributionManager = null;
           }
 
-          if (dm != null) { // Send CacheClosedMessage (and NOTHING ELSE) here
+          if (distributionManager != null) { // Send CacheClosedMessage (and NOTHING ELSE) here
             if (isDebugEnabled) {
               logger.debug("{}: sending CloseCache to peers...", this);
             }
-            Set otherMembers = dm.getOtherDistributionManagerIds();
-            ReplyProcessor21 processor = new ReplyProcessor21(system, otherMembers);
+            Set otherMembers = distributionManager.getOtherDistributionManagerIds();
+            ReplyProcessor21 processor = new ReplyProcessor21(this.system, otherMembers);
             CloseCacheMessage msg = new CloseCacheMessage();
             msg.setRecipients(otherMembers);
             msg.setProcessorId(processor.getProcessorId());
-            dm.putOutgoing(msg);
+            distributionManager.putOutgoing(msg);
             try {
               processor.waitForReplies();
-            } catch (InterruptedException ex) {
+            } catch (InterruptedException ignore) {
               // Thread.currentThread().interrupt(); // TODO ??? should we reset this bit later?
               // Keep going, make best effort to shut down.
-            } catch (ReplyException ex) {
+            } catch (ReplyException ignore) {
               // keep going
             }
             // set closed state after telling others and getting responses
@@ -2316,17 +2309,15 @@ public class GemFireCacheImpl
           }
           // NO MORE Distributed Messaging AFTER THIS POINT!!!!
 
-          {
-            ClientMetadataService cms = this.clientMetadataService;
-            if (cms != null) {
-              cms.close();
-            }
-            HeapEvictor he = this.heapEvictor;
-            if (he != null) {
-              he.close();
-            }
+          ClientMetadataService cms = this.clientMetadataService;
+          if (cms != null) {
+            cms.close();
+          }
+          HeapEvictor he = this.heapEvictor;
+          if (he != null) {
+            he.close();
           }
-        } catch (CancelException e) {
+        } catch (CancelException ignore) {
           // make sure the disk stores get closed
           closeDiskStores();
           // NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
@@ -2334,8 +2325,8 @@ public class GemFireCacheImpl
 
         // Close the CqService Handle.
         try {
-          cqService.close();
-        } catch (Exception ex) {
+          this.cqService.close();
+        } catch (RuntimeException ignore) {
           logger.info(LocalizedMessage.create(
               LocalizedStrings.GemFireCache_FAILED_TO_GET_THE_CQSERVICE_TO_CLOSE_DURING_CACHE_CLOSE_2));
         }
@@ -2345,7 +2336,7 @@ public class GemFireCacheImpl
 
         EventTracker.stopTrackerServices(this);
 
-        synchronized (ccpTimerMutex) {
+        synchronized (this.ccpTimerMutex) {
           if (this.ccpTimer != null) {
             this.ccpTimer.cancel();
           }
@@ -2375,7 +2366,7 @@ public class GemFireCacheImpl
 
       if (!keepDS) {
         // keepDS is used by ShutdownAll. It will override DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE
-        if (!DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE) {
+        if (!this.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE) {
           this.system.disconnect();
         }
       }
@@ -2436,7 +2427,7 @@ public class GemFireCacheImpl
   }
 
   private void stopRedisServer() {
-    if (redisServer != null)
+    if (this.redisServer != null)
       this.redisServer.shutdown();
   }
 
@@ -2482,7 +2473,7 @@ public class GemFireCacheImpl
   void addDiskStore(DiskStoreImpl dsi) {
     this.diskStores.put(dsi.getName(), dsi);
     if (!dsi.isOffline()) {
-      getDiskStoreMonitor().addDiskStore(dsi);
+      this.diskMonitor.addDiskStore(dsi);
     }
   }
 
@@ -2491,13 +2482,13 @@ public class GemFireCacheImpl
     this.regionOwnedDiskStores.remove(dsi.getName());
     // Added for M&M
     if (!dsi.getOwnedByRegion())
-      system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
+      this.system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
   }
 
   void addRegionOwnedDiskStore(DiskStoreImpl dsi) {
     this.regionOwnedDiskStores.put(dsi.getName(), dsi);
     if (!dsi.isOffline()) {
-      getDiskStoreMonitor().addDiskStore(dsi);
+      this.diskMonitor.addDiskStore(dsi);
     }
   }
 
@@ -2511,8 +2502,8 @@ public class GemFireCacheImpl
         }
         dsi.close();
         // Added for M&M
-        system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
-      } catch (Exception e) {
+        this.system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
+      } catch (RuntimeException e) {
         logger.fatal(
             LocalizedMessage.create(LocalizedStrings.Disk_Store_Exception_During_Cache_Close), e);
       }
@@ -2524,14 +2515,14 @@ public class GemFireCacheImpl
    * Used by unit tests to allow them to change the default disk store name.
    */
   public static void setDefaultDiskStoreName(String dsName) {
-    DEFAULT_DS_NAME = dsName;
+    defaultDiskStoreName = dsName;
   }
 
   public static String getDefaultDiskStoreName() {
-    return DEFAULT_DS_NAME;
+    return defaultDiskStoreName;
   }
 
-  public static String DEFAULT_DS_NAME = DiskStoreFactory.DEFAULT_DISK_STORE_NAME;
+  private static String defaultDiskStoreName = DiskStoreFactory.DEFAULT_DISK_STORE_NAME;
 
   @Override
   public DiskStoreImpl getOrCreateDefaultDiskStore() {
@@ -2540,7 +2531,7 @@ public class GemFireCacheImpl
       synchronized (this) {
         result = (DiskStoreImpl) findDiskStore(null);
         if (result == null) {
-          result = (DiskStoreImpl) createDiskStoreFactory().create(DEFAULT_DS_NAME);
+          result = (DiskStoreImpl) createDiskStoreFactory().create(defaultDiskStoreName);
         }
       }
     }
@@ -2555,7 +2546,7 @@ public class GemFireCacheImpl
   @Override
   public DiskStore findDiskStore(String name) {
     if (name == null) {
-      name = DEFAULT_DS_NAME;
+      name = defaultDiskStoreName;
     }
     return this.diskStores.get(name);
   }
@@ -2572,7 +2563,7 @@ public class GemFireCacheImpl
 
   @Override
   public Collection<DiskStoreImpl> listDiskStoresIncludingRegionOwned() {
-    HashSet<DiskStoreImpl> allDiskStores = new HashSet<>();
+    Collection<DiskStoreImpl> allDiskStores = new HashSet<>();
     allDiskStores.addAll(this.diskStores.values());
     allDiskStores.addAll(this.regionOwnedDiskStores.values());
     return allDiskStores;
@@ -2614,7 +2605,7 @@ public class GemFireCacheImpl
           logger.debug("Ignored cache closure while closing bridge {}", cacheServer, e);
         }
       }
-      allCacheServers.remove(cacheServer);
+      this.allCacheServers.remove(cacheServer);
       stoppedCacheServer = true;
     }
     if (stoppedCacheServer) {
@@ -2686,11 +2677,11 @@ public class GemFireCacheImpl
   @Override
   public Set<DistributedMember> getMembers(Region region) {
     if (region instanceof DistributedRegion) {
-      DistributedRegion d = (DistributedRegion) region;
-      return (Set<DistributedMember>) d.getDistributionAdvisor().adviseCacheOp();
+      DistributedRegion distributedRegion = (DistributedRegion) region;
+      return (Set<DistributedMember>) distributedRegion.getDistributionAdvisor().adviseCacheOp();
     } else if (region instanceof PartitionedRegion) {
-      PartitionedRegion p = (PartitionedRegion) region;
-      return (Set<DistributedMember>) p.getRegionAdvisor().adviseAllPRNodes();
+      PartitionedRegion partitionedRegion = (PartitionedRegion) region;
+      return (Set<DistributedMember>) partitionedRegion.getRegionAdvisor().adviseAllPRNodes();
     } else {
       return Collections.emptySet();
     }
@@ -2772,7 +2763,7 @@ public class GemFireCacheImpl
   public List<Properties> getDeclarableProperties(final String className) {
     List<Properties> propertiesList = new ArrayList<>();
     synchronized (this.declarablePropertiesMap) {
-      for (Map.Entry<Declarable, Properties> entry : this.declarablePropertiesMap.entrySet()) {
+      for (Entry<Declarable, Properties> entry : this.declarablePropertiesMap.entrySet()) {
         if (entry.getKey().getClass().getName().equals(className)) {
           propertiesList.add(entry.getValue());
         }
@@ -2817,9 +2808,9 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> attrs)
+  public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> aRegionAttributes)
       throws RegionExistsException, TimeoutException {
-    return createRegion(name, attrs);
+    return createRegion(name, aRegionAttributes);
   }
 
   private PoolFactory createDefaultPF() {
@@ -2828,7 +2819,7 @@ public class GemFireCacheImpl
       String localHostName = SocketCreator.getHostName(SocketCreator.getLocalHost());
       defaultPoolFactory.addServer(localHostName, CacheServer.DEFAULT_PORT);
     } catch (UnknownHostException ex) {
-      throw new IllegalStateException("Could not determine local host name");
+      throw new IllegalStateException("Could not determine local host name", ex);
     }
     return defaultPoolFactory;
   }
@@ -2861,7 +2852,7 @@ public class GemFireCacheImpl
         }
         if (pool == null) {
           // if pool is still null then we will not have a default pool for this ClientCache
-          setDefaultPool(null);
+          this.defaultPool = null;
           return;
         }
       }
@@ -2872,7 +2863,7 @@ public class GemFireCacheImpl
           String localHostName = SocketCreator.getHostName(SocketCreator.getLocalHost());
           pfi.addServer(localHostName, CacheServer.DEFAULT_PORT);
         } catch (UnknownHostException ex) {
-          throw new IllegalStateException("Could not determine local host name");
+          throw new IllegalStateException("Could not determine local host name", ex);
         }
       }
       // look for a pool that already exists that is compatible with
@@ -2897,7 +2888,7 @@ public class GemFireCacheImpl
       }
       pool = this.poolFactory.create(poolName);
     }
-    setDefaultPool(pool);
+    this.defaultPool = pool;
   }
 
   /**
@@ -2905,10 +2896,10 @@ public class GemFireCacheImpl
    *
    * @return the default pool that is right for us
    */
-  public Pool determineDefaultPool(PoolFactory pf) {
+  public Pool determineDefaultPool(PoolFactory poolFactory) {
     Pool pool;
     // create the pool if it does not already exist
-    if (pf == null) {
+    if (poolFactory == null) {
       Map<String, Pool> pools = PoolManager.getAll();
       if (pools.isEmpty()) {
         throw new IllegalStateException("Since a cache already existed a pool should also exist.");
@@ -2939,18 +2930,19 @@ public class GemFireCacheImpl
         }
       }
     } else {
-      PoolFactoryImpl pfi = (PoolFactoryImpl) pf;
-      if (pfi.getPoolAttributes().locators.isEmpty() && pfi.getPoolAttributes().servers.isEmpty()) {
+      PoolFactoryImpl poolFactoryImpl = (PoolFactoryImpl) poolFactory;
+      if (poolFactoryImpl.getPoolAttributes().locators.isEmpty()
+          && poolFactoryImpl.getPoolAttributes().servers.isEmpty()) {
         try {
           String localHostName = SocketCreator.getHostName(SocketCreator.getLocalHost());
-          pfi.addServer(localHostName, CacheServer.DEFAULT_PORT);
+          poolFactoryImpl.addServer(localHostName, CacheServer.DEFAULT_PORT);
         } catch (UnknownHostException ex) {
-          throw new IllegalStateException("Could not determine local host name");
+          throw new IllegalStateException("Could not determine local host name", ex);
         }
       }
-      PoolImpl defPool = (PoolImpl) getDefaultPool();
-      if (defPool != null && defPool.isCompatible(pfi.getPoolAttributes())) {
-        pool = defPool;
+      PoolImpl defaultPool = (PoolImpl) getDefaultPool();
+      if (defaultPool != null && defaultPool.isCompatible(poolFactoryImpl.getPoolAttributes())) {
+        pool = defaultPool;
       } else {
         throw new IllegalStateException("Existing cache's default pool was not compatible");
       }
@@ -2959,12 +2951,12 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public <K, V> Region<K, V> createRegion(String name, RegionAttributes<K, V> attrs)
+  public <K, V> Region<K, V> createRegion(String name, RegionAttributes<K, V> aRegionAttributes)
       throws RegionExistsException, TimeoutException {
     if (isClient()) {
       throw new UnsupportedOperationException("operation is not supported on a client cache");
     }
-    return basicCreateRegion(name, attrs);
+    return basicCreateRegion(name, aRegionAttributes);
   }
 
   public <K, V> Region<K, V> basicCreateRegion(String name, RegionAttributes<K, V> attrs)
@@ -2979,26 +2971,25 @@ public class GemFireCacheImpl
       return createVMRegion(name, attrs, ira);
     } catch (IOException | ClassNotFoundException e) {
       // only if loading snapshot, not here
-      InternalGemFireError assErr = new InternalGemFireError(
-          LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
-      assErr.initCause(e);
-      throw assErr;
+      throw new InternalGemFireError(
+          LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString(), e);
     }
   }
 
   @Override
-  public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> attributesArg,
+  public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> p_attrs,
       InternalRegionArguments internalRegionArgs)
       throws RegionExistsException, TimeoutException, IOException, ClassNotFoundException {
+    // TODO: refactor overly complex method
     if (getMyId().getVmKind() == DistributionManager.LOCATOR_DM_TYPE) {
       if (!internalRegionArgs.isUsedForMetaRegion()
           && internalRegionArgs.getInternalMetaRegion() == null) {
         throw new IllegalStateException("Regions can not be created in a locator.");
       }
     }
-    stopper.checkCancelInProgress(null);
+    this.stopper.checkCancelInProgress(null);
     LocalRegion.validateRegionName(name, internalRegionArgs);
-    RegionAttributes<K, V> attrs = attributesArg;
+    RegionAttributes<K, V> attrs = p_attrs;
     attrs = invokeRegionBefore(null, name, attrs, internalRegionArgs);
     if (attrs == null) {
       throw new IllegalArgumentException(
@@ -3054,14 +3045,14 @@ public class GemFireCacheImpl
 
         boolean interrupted = Thread.interrupted();
         try { // future != null
-          LocalRegion r = (LocalRegion) future.get(); // wait on Future
-          throw new RegionExistsException(r);
-        } catch (InterruptedException e) {
+          LocalRegion localRegion = (LocalRegion) future.get(); // wait on Future
+          throw new RegionExistsException(localRegion);
+        } catch (InterruptedException ignore) {
           interrupted = true;
         } catch (ExecutionException e) {
           throw new Error(LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString(),
               e);
-        } catch (CancellationException e) {
+        } catch (CancellationException ignore) {
           // future was cancelled
         } finally {
           if (interrupted) {
@@ -3078,7 +3069,7 @@ public class GemFireCacheImpl
       } catch (CancelException | RedundancyAlreadyMetException e) {
         // don't print a call stack
         throw e;
-      } catch (final RuntimeException validationException) {
+      } catch (RuntimeException validationException) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.GemFireCache_INITIALIZATION_FAILED_FOR_REGION_0, region.getFullPath()),
             validationException);
@@ -3094,7 +3085,7 @@ public class GemFireCacheImpl
             throw e;
           } catch (Throwable t) {
             SystemFailure.checkFailure();
-            stopper.checkCancelInProgress(t);
+            this.stopper.checkCancelInProgress(t);
 
             // bug #44672 - log the failure but don't override the original exception
             logger.warn(LocalizedMessage.create(
@@ -3117,15 +3108,16 @@ public class GemFireCacheImpl
       region.postCreateRegion();
     } catch (RegionExistsException ex) {
       // outside of sync make sure region is initialized to fix bug 37563
-      LocalRegion r = (LocalRegion) ex.getRegion();
-      r.waitOnInitialization(); // don't give out ref until initialized
+      LocalRegion localRegion = (LocalRegion) ex.getRegion();
+      localRegion.waitOnInitialization(); // don't give out ref until initialized
       throw ex;
     }
 
     invokeRegionAfter(region);
+
     // Added for M&M . Putting the callback here to avoid creating RegionMBean in case of Exception
     if (!region.isInternalRegion()) {
-      system.handleResourceEvent(ResourceEvent.REGION_CREATE, region);
+      this.system.handleResourceEvent(ResourceEvent.REGION_CREATE, region);
     }
 
     return region;
@@ -3133,16 +3125,17 @@ public class GemFireCacheImpl
 
   @Override
   public <K, V> RegionAttributes<K, V> invokeRegionBefore(LocalRegion parent, String name,
-      RegionAttributes<K, V> attributes, InternalRegionArguments internalRegionArgs) {
-    for (RegionListener listener : regionListeners) {
-      attributes = listener.beforeCreate(parent, name, attributes, internalRegionArgs);
+      RegionAttributes<K, V> attrs, InternalRegionArguments internalRegionArgs) {
+    for (RegionListener listener : this.regionListeners) {
+      attrs =
+          (RegionAttributes<K, V>) listener.beforeCreate(parent, name, attrs, internalRegionArgs);
     }
-    return attributes;
+    return attrs;
   }
 
   @Override
   public void invokeRegionAfter(LocalRegion region) {
-    for (RegionListener listener : regionListeners) {
+    for (RegionListener listener : this.regionListeners) {
       listener.afterCreate(region);
     }
   }
@@ -3170,7 +3163,7 @@ public class GemFireCacheImpl
           if (dataStore != null) {
             Set<Entry<Integer, BucketRegion>> bucketEntries =
                 partitionedRegion.getDataStore().getAllLocalBuckets();
-            for (Map.Entry entry : bucketEntries) {
+            for (Entry entry : bucketEntries) {
               result.add((LocalRegion) entry.getValue());
             }
           }
@@ -3202,11 +3195,11 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public void setRegionByPath(String path, LocalRegion localRegion) {
-    if (localRegion == null) {
+  public void setRegionByPath(String path, LocalRegion r) {
+    if (r == null) {
       this.pathToRegion.remove(path);
     } else {
-      this.pathToRegion.put(path, localRegion);
+      this.pathToRegion.put(path, r);
     }
   }
 
@@ -3218,7 +3211,7 @@ public class GemFireCacheImpl
       throw new IllegalArgumentException(
           LocalizedStrings.GemFireCache_PATH_CANNOT_BE_NULL.toLocalizedString());
     }
-    if (path.length() == 0) {
+    if (path.isEmpty()) {
       throw new IllegalArgumentException(
           LocalizedStrings.GemFireCache_PATH_CANNOT_BE_EMPTY.toLocalizedString());
     }
@@ -3232,11 +3225,10 @@ public class GemFireCacheImpl
   public LocalRegion getRegionByPath(String path) {
     validatePath(path); // fix for bug 34892
 
-    { // do this before checking the pathToRegion map
-      LocalRegion result = getReinitializingRegion(path);
-      if (result != null) {
-        return result;
-      }
+    // do this before checking the pathToRegion map
+    LocalRegion result = getReinitializingRegion(path);
+    if (result != null) {
+      return result;
     }
     return (LocalRegion) this.pathToRegion.get(path);
   }
@@ -3244,7 +3236,7 @@ public class GemFireCacheImpl
   public LocalRegion getRegionByPathForProcessing(String path) {
     LocalRegion result = getRegionByPath(path);
     if (result == null) {
-      stopper.checkCancelInProgress(null);
+      this.stopper.checkCancelInProgress(null);
       int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT); // go through
       // initialization latches
       try {
@@ -3273,17 +3265,16 @@ public class GemFireCacheImpl
   @Override
   public Region getRegion(String path, boolean returnDestroyedRegion) {
     this.stopper.checkCancelInProgress(null);
-    {
-      LocalRegion result = getRegionByPath(path);
-      // Do not waitOnInitialization() for PR
-      if (result != null) {
-        result.waitOnInitialization();
-        if (!returnDestroyedRegion && result.isDestroyed()) {
-          this.stopper.checkCancelInProgress(null);
-          return null;
-        } else {
-          return result;
-        }
+
+    LocalRegion result = getRegionByPath(path);
+    // Do not waitOnInitialization() for PR
+    if (result != null) {
+      result.waitOnInitialization();
+      if (!returnDestroyedRegion && result.isDestroyed()) {
+        this.stopper.checkCancelInProgress(null);
+        return null;
+      } else {
+        return result;
       }
     }
 
@@ -3312,7 +3303,7 @@ public class GemFireCacheImpl
 
   /** Return true if this region is initializing */
   boolean isGlobalRegionInitializing(String fullPath) {
-    stopper.checkCancelInProgress(null);
+    this.stopper.checkCancelInProgress(null);
     int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT); // go through
     // initialization latches
     try {
@@ -3357,7 +3348,7 @@ public class GemFireCacheImpl
       }
     }
     if (waitForInit) {
-      for (Iterator iterator = regions.iterator(); iterator.hasNext();) {
+      for (Iterator<Region<?, ?>> iterator = regions.iterator(); iterator.hasNext();) {
         LocalRegion region = (LocalRegion) iterator.next();
         if (!region.checkForInitialization()) {
           iterator.remove();
@@ -3373,14 +3364,14 @@ public class GemFireCacheImpl
    * @since GemFire 5.7
    */
   @Override
-  public void cleanupForClient(CacheClientNotifier notifier, ClientProxyMembershipID client) {
+  public void cleanupForClient(CacheClientNotifier ccn, ClientProxyMembershipID client) {
     try {
       if (isClosed()) {
         return;
       }
       for (Object region : rootRegions(false, false)) {
         LocalRegion localRegion = (LocalRegion) region;
-        localRegion.cleanupForClient(notifier, client);
+        localRegion.cleanupForClient(ccn, client);
       }
     } catch (DistributedSystemDisconnectedException ignore) {
     }
@@ -3474,12 +3465,12 @@ public class GemFireCacheImpl
         logger.debug("Returning manifested future for: {}", fullPath);
       }
       return region;
-    } catch (InterruptedException e) {
+    } catch (InterruptedException ignore) {
       Thread.currentThread().interrupt();
       return null;
     } catch (ExecutionException e) {
       throw new Error(LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString(), e);
-    } catch (CancellationException e) {
+    } catch (CancellationException ignore) {
       // future was cancelled
       logger.debug("future cancelled, returning null");
       return null;
@@ -3541,7 +3532,7 @@ public class GemFireCacheImpl
   }
 
   /**
-   * Implementation of {@link org.apache.geode.cache.Cache#setCopyOnRead}
+   * Implementation of {@link Cache#setCopyOnRead}
    *
    * @since GemFire 4.0
    */
@@ -3551,7 +3542,7 @@ public class GemFireCacheImpl
   }
 
   /**
-   * Implementation of {@link org.apache.geode.cache.Cache#getCopyOnRead}
+   * Implementation of {@link Cache#getCopyOnRead}
    *
    * @since GemFire 4.0
    */
@@ -3563,17 +3554,17 @@ public class GemFireCacheImpl
   /**
    * Remove the specified root region
    *
-   * @param rootRegion the region to be removed
+   * @param rootRgn the region to be removed
    * @return true if root region was removed, false if not found
    */
   @Override
-  public boolean removeRoot(LocalRegion rootRegion) {
+  public boolean removeRoot(LocalRegion rootRgn) {
     synchronized (this.rootRegions) {
-      String regionName = rootRegion.getName();
+      String regionName = rootRgn.getName();
       LocalRegion found = this.rootRegions.get(regionName);
-      if (found == rootRegion) {
+      if (found == rootRgn) {
         LocalRegion previous = this.rootRegions.remove(regionName);
-        Assert.assertTrue(previous == rootRegion);
+        Assert.assertTrue(previous == rootRgn);
         return true;
       } else
         return false;
@@ -3584,8 +3575,7 @@ public class GemFireCacheImpl
    * @return array of two Strings, the root name and the relative path from root. If there is no
    *         relative path from root, then String[1] will be an empty string
    */
-  static String[] parsePath(String p_path) {
-    String path = p_path;
+  static String[] parsePath(String path) {
     validatePath(path);
     String[] result = new String[2];
     result[1] = "";
@@ -3624,13 +3614,13 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public void addRegionListener(RegionListener listener) {
-    this.regionListeners.add(listener);
+  public void addRegionListener(RegionListener l) {
+    this.regionListeners.add(l);
   }
 
   @Override
-  public void removeRegionListener(RegionListener listener) {
-    this.regionListeners.remove(listener);
+  public void removeRegionListener(RegionListener l) {
+    this.regionListeners.remove(l);
   }
 
   @Override
@@ -3773,7 +3763,7 @@ public class GemFireCacheImpl
 
     synchronized (this.allGatewaySendersLock) {
       if (!this.allGatewaySenders.contains(sender)) {
-        new UpdateAttributesProcessor((AbstractGatewaySender) sender).distribute(true);
+        new UpdateAttributesProcessor((DistributionAdvisee) sender).distribute(true);
         Set<GatewaySender> newSenders = new HashSet<>(this.allGatewaySenders.size() + 1);
         if (!this.allGatewaySenders.isEmpty()) {
           newSenders.addAll(this.allGatewaySenders);
@@ -3821,7 +3811,7 @@ public class GemFireCacheImpl
 
     synchronized (this.allGatewaySendersLock) {
       if (this.allGatewaySenders.contains(sender)) {
-        new UpdateAttributesProcessor((AbstractGatewaySender) sender, true).distribute(true);
+        new UpdateAttributesProcessor((DistributionAdvisee) sender, true).distribute(true);
         Set<GatewaySender> newSenders = new HashSet<>(this.allGatewaySenders.size() - 1);
         if (!this.allGatewaySenders.isEmpty()) {
           newSenders.addAll(this.allGatewaySenders);
@@ -3882,9 +3872,9 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public GatewaySender getGatewaySender(String Id) {
+  public GatewaySender getGatewaySender(String id) {
     for (GatewaySender sender : this.allGatewaySenders) {
-      if (sender.getId().equals(Id)) {
+      if (sender.getId().equals(id)) {
         return sender;
       }
     }
@@ -3995,27 +3985,26 @@ public class GemFireCacheImpl
     }
   }
 
-  private TreeMap<String, Map<String, PartitionedRegion>> getPRTrees() {
+  private SortedMap<String, Map<String, PartitionedRegion>> getPRTrees() {
     // prTree will save a sublist of PRs who are under the same root
-    TreeMap<String, Map<String, PartitionedRegion>> prTrees = new TreeMap<>();
-    TreeMap<String, PartitionedRegion> prMap = getPartitionedRegionMap();
+    SortedMap<String, PartitionedRegion> prMap = getPartitionedRegionMap();
     boolean hasColocatedRegion = false;
     for (PartitionedRegion pr : prMap.values()) {
       List<PartitionedRegion> childList = ColocationHelper.getColocatedChildRegions(pr);
-      if (childList != null && childList.size() > 0) {
+      if (childList != null && !childList.isEmpty()) {
         hasColocatedRegion = true;
         break;
       }
     }
 
+    TreeMap<String, Map<String, PartitionedRegion>> prTrees = new TreeMap<>();
     if (hasColocatedRegion) {
-      LinkedHashMap<String, PartitionedRegion> orderedPrMap = orderByColocation(prMap);
+      Map<String, PartitionedRegion> orderedPrMap = orderByColocation(prMap);
       prTrees.put("ROOT", orderedPrMap);
     } else {
       for (PartitionedRegion pr : prMap.values()) {
         String rootName = pr.getRoot().getName();
-        TreeMap<String, PartitionedRegion> prSubMap =
-            (TreeMap<String, PartitionedRegion>) prTrees.get(rootName);
+        Map<String, PartitionedRegion> prSubMap = prTrees.get(rootName);
         if (prSubMap == null) {
           prSubMap = new TreeMap<>();
           prTrees.put(rootName, prSubMap);
@@ -4027,11 +4016,11 @@ public class GemFireCacheImpl
     return prTrees;
   }
 
-  private TreeMap<String, PartitionedRegion> getPartitionedRegionMap() {
-    TreeMap<String, PartitionedRegion> prMap = new TreeMap<>();
-    for (Map.Entry<String, Region<?, ?>> entry : this.pathToRegion.entrySet()) {
+  private SortedMap<String, PartitionedRegion> getPartitionedRegionMap() {
+    SortedMap<String, PartitionedRegion> prMap = new TreeMap<>();
+    for (Entry<String, Region<?, ?>> entry : this.pathToRegion.entrySet()) {
       String regionName = entry.getKey();
-      Region region = entry.getValue();
+      Region<?, ?> region = entry.getValue();
 
       // Don't wait for non partitioned regions
       if (!(region instanceof PartitionedRegion)) {
@@ -4044,7 +4033,7 @@ public class GemFireCacheImpl
         if (pr instanceof PartitionedRegion) {
           prMap.put(regionName, (PartitionedRegion) pr);
         }
-      } catch (CancelException ce) {
+      } catch (CancelException ignore) {
         // if some region throws cancel exception during initialization,
         // then no need to shutDownAll them gracefully
       }
@@ -4053,8 +4042,7 @@ public class GemFireCacheImpl
     return prMap;
   }
 
-  private LinkedHashMap<String, PartitionedRegion> orderByColocation(
-      TreeMap<String, PartitionedRegion> prMap) {
+  private Map<String, PartitionedRegion> orderByColocation(Map<String, PartitionedRegion> prMap) {
     LinkedHashMap<String, PartitionedRegion> orderedPrMap = new LinkedHashMap<>();
     for (PartitionedRegion pr : prMap.values()) {
       addColocatedChildRecursively(orderedPrMap, pr);
@@ -4075,12 +4063,12 @@ public class GemFireCacheImpl
    * Notification adds to the messaging a PR must do on each put/destroy/invalidate operation and
    * should be kept to a minimum
    *
-   * @param region the partitioned region
+   * @param r the partitioned region
    * @return true if the region should deliver all of its events to this cache
    */
   @Override
-  public boolean requiresNotificationFromPR(PartitionedRegion region) {
-    boolean hasSerialSenders = hasSerialSenders(region);
+  public boolean requiresNotificationFromPR(PartitionedRegion r) {
+    boolean hasSerialSenders = hasSerialSenders(r);
     if (!hasSerialSenders) {
       for (CacheServerImpl server : this.allCacheServers) {
         if (!server.getNotifyBySubscription()) {
@@ -4134,20 +4122,20 @@ public class GemFireCacheImpl
     if (isClient()) {
       return false;
     }
-    stopper.checkCancelInProgress(null);
+    this.stopper.checkCancelInProgress(null);
 
-    return this.isServer || this.allCacheServers.size() > 0;
+    return this.isServer || !this.allCacheServers.isEmpty();
   }
 
   @Override
   public QueryService getQueryService() {
     if (isClient()) {
-      Pool defaultPool = getDefaultPool();
-      if (defaultPool == null) {
+      Pool pool = getDefaultPool();
+      if (pool == null) {
         throw new IllegalStateException(
             "Client cache does not have a default pool. Use getQueryService(String poolName) instead.");
       } else {
-        return defaultPool.getQueryService();
+        return pool.getQueryService();
       }
     } else {
       return new DefaultQueryService(this);
@@ -4195,11 +4183,11 @@ public class GemFireCacheImpl
   }
 
   @Override
-  public <K, V> void setRegionAttributes(String id, RegionAttributes<K, V> regionAttributes) {
-    if (regionAttributes == null) {
+  public <K, V> void setRegionAttributes(String id, RegionAttributes<K, V> attrs) {
+    if (attrs == null) {
       this.namedRegionAttributes.remove(id);
     } else {
-      this.namedRegionAttributes.put(id, regionAttributes);
+      this.namedRegionAttributes.put(id, attrs);
     }
   }
 
@@ -4211,18 +4199,23 @@ public class GemFireCacheImpl
   private static final ThreadLocal<GemFireCacheImpl> xmlCache = new ThreadLocal<>();
 
   @Override
-  public void loadCacheXml(InputStream stream)
+  public void loadCacheXml(InputStream is)
       throws TimeoutException, CacheWriterException, GatewayException, RegionExistsException {
     // make this cache available to callbacks being initialized during xml create
     final GemFireCacheImpl oldValue = xmlCache.get();
     xmlCache.set(this);
+
+    Reader reader = null;
+    Writer stringWriter = null;
+    OutputStreamWriter writer = null;
+
     try {
       CacheXmlParser xml;
 
       if (XML_PARAMETERIZATION_ENABLED) {
         char[] buffer = new char[1024];
-        Reader reader = new BufferedReader(new InputStreamReader(stream, "ISO-8859-1"));
-        Writer stringWriter = new StringWriter();
+        reader = new BufferedReader(new InputStreamReader(is, "ISO-8859-1"));
+        stringWriter = new StringWriter();
 
         int numChars;
         while ((numChars = reader.read(buffer)) != -1) {
@@ -4232,27 +4225,39 @@ public class GemFireCacheImpl
         /*
          * Now replace all replaceable system properties here using {@code PropertyResolver}
          */
-        String replacedXmlString = resolver.processUnresolvableString(stringWriter.toString());
+        String replacedXmlString = this.resolver.processUnresolvableString(stringWriter.toString());
 
         /*
          * Turn the string back into the default encoding so that the XML parser can work correctly
          * in the presence of an "encoding" attribute in the XML prolog.
          */
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        OutputStreamWriter writer = new OutputStreamWriter(baos, "ISO-8859-1");
+        writer = new OutputStreamWriter(baos, "ISO-8859-1");
         writer.write(replacedXmlString);
         writer.flush();
 
         xml = CacheXmlParser.parse(new ByteArrayInputStream(baos.toByteArray()));
       } else {
-        xml = CacheXmlParser.parse(stream);
+        xml = CacheXmlParser.parse(is);
       }
       xml.create(this);
     } catch (IOException e) {
       throw new CacheXmlException(
-          "Input Stream could not be read for system property substitutions.");
+          "Input Stream could not be read for system property substitutions.", e);
     } finally {
       xmlCache.set(oldValue);
+      closeQuietly(reader);
+      closeQuietly(stringWriter);
+      closeQuietly(writer);
+    }
+  }
+
+  private static void closeQuietly(Closeable closeable) { // KIRK
+    try {
+      if (closeable != null) {
+        closeable.close();
+      }
+    } catch (IOException ignore) {
     }
   }
 
@@ -4276,7 +4281,7 @@ public class GemFireCacheImpl
   @Override
   public InternalResourceManager getInternalResourceManager(boolean checkCancellationInProgress) {
     if (checkCancellationInProgress) {
-      stopper.checkCancelInProgress(null);
+      this.stopper.checkCancelInProgress(null);
     }
     return this.resourceManager;
   }
@@ -4311,7 +4316,7 @@ public class GemFireCacheImpl
   // TODO make this a simple int guarded by riWaiters and get rid of the double-check
   private final AtomicInteger registerInterestsInProgress = new AtomicInteger();
 
-  private final ArrayList<SimpleWaiter> riWaiters = new ArrayList<>();
+  private final List<SimpleWaiter> riWaiters = new ArrayList<>();
 
   // never changes but is currently only initialized in constructor by unit tests
   private TypeRegistry pdxRegistry;
@@ -4331,7 +4336,7 @@ public class GemFireCacheImpl
     }
     if (numInProgress == 0) {
       synchronized (this.riWaiters) {
-        // TODO double-check
+        // TODO: get rid of double-check
         numInProgress = this.registerInterestsInProgress.get();
         if (numInProgress == 0) { // all clear
           if (logger.isDebugEnabled()) {
@@ -4365,8 +4370,8 @@ public class GemFireCacheImpl
     getCancelCriterion().checkCancelInProgress(null);
 
     int count = this.registerInterestsInProgress.get();
-    SimpleWaiter simpleWaiter = null;
     if (count > 0) {
+      SimpleWaiter simpleWaiter = null;
       synchronized (this.riWaiters) {
         // TODO double-check
         count = this.registerInterestsInProgress.get();
@@ -4459,7 +4464,7 @@ public class GemFireCacheImpl
           getCancelCriterion().checkCancelInProgress(null);
           boolean interrupted = Thread.interrupted();
           try {
-            this.wait(1000);
+            wait(1000);
           } catch (InterruptedException ignore) {
             interrupted = true;
           } finally {
@@ -4495,7 +4500,7 @@ public class GemFireCacheImpl
       // Wait for replies.
       try {
         replyProcessor.waitForReplies();
-      } catch (InterruptedException ie) {
+      } catch (InterruptedException ignore) {
         Thread.currentThread().interrupt();
       }
     }
@@ -5059,16 +5064,18 @@ public class GemFireCacheImpl
    */
   public void addDeclarableProperties(final Map<Declarable, Properties> mapOfNewDeclarableProps) {
     synchronized (this.declarablePropertiesMap) {
-      for (Map.Entry<Declarable, Properties> newEntry : mapOfNewDeclarableProps.entrySet()) {
+      for (Entry<Declarable, Properties> newEntry : mapOfNewDeclarableProps.entrySet()) {
         // Find and remove a Declarable from the map if an "equal" version is already stored
         Class<? extends Declarable> clazz = newEntry.getKey().getClass();
 
         Declarable matchingDeclarable = null;
-        for (Map.Entry<Declarable, Properties> oldEntry : this.declarablePropertiesMap.entrySet()) {
-          if (clazz.getName().equals(oldEntry.getKey().getClass().getName()) && (newEntry.getValue()
-              .equals(oldEntry.getValue())
-              || ((newEntry.getKey() instanceof Identifiable) && (((Identifiable) oldEntry.getKey())
-                  .getId().equals(((Identifiable) newEntry.getKey()).getId()))))) {
+        for (Entry<Declarable, Properties> oldEntry : this.declarablePropertiesMap.entrySet()) {
+          boolean isKeyClassSame = clazz.getName().equals(oldEntry.getKey().getClass().getName());
+          boolean isValueEqual = newEntry.getValue().equals(oldEntry.getValue());
+          boolean isKeyIdentifiableAndSameId =
+              Identifiable.class.isInstance(newEntry.getKey()) && ((Identifiable) oldEntry.getKey())
+                  .getId().equals(((Identifiable) newEntry.getKey()).getId());
+          if (isKeyClassSame && (isValueEqual || isKeyIdentifiableAndSameId)) {
             matchingDeclarable = oldEntry.getKey();
             break;
           }
@@ -5138,7 +5145,7 @@ public class GemFireCacheImpl
   }
 
   DiskStoreMonitor getDiskStoreMonitor() {
-    return diskMonitor;
+    return this.diskMonitor;
   }
 
   /**


[4/6] geode git commit: Safe refactorings

Posted by kl...@apache.org.
Safe refactorings


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c5b8cbe8
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c5b8cbe8
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c5b8cbe8

Branch: refs/heads/feature/GEODE-2632-6-1
Commit: c5b8cbe83bb48de0c20886a21e0106716ae6665f
Parents: 53f5c96
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Apr 24 10:38:37 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Apr 25 11:03:35 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/CancelCriterion.java  |   23 +-
 .../main/java/org/apache/geode/cache/Cache.java |  132 +-
 .../geode/cache/client/internal/ProxyCache.java |   18 +-
 .../cache/query/internal/QueryMonitor.java      |    6 +-
 .../apache/geode/distributed/internal/DM.java   |    5 +-
 .../internal/DistributionManager.java           |    2 +-
 .../internal/InternalDistributedSystem.java     |    7 +-
 .../internal/LonerDistributionManager.java      |    5 +
 .../geode/internal/cache/DistTXState.java       |    6 +-
 .../geode/internal/cache/GemFireCacheImpl.java  | 2240 ++++++++----------
 .../geode/internal/cache/InternalCache.java     |    4 +-
 .../geode/internal/cache/LocalRegion.java       |    4 +-
 .../util/FindRestEnabledServersFunction.java    |    8 +-
 .../persistence/PersistenceAdvisorImpl.java     |    8 +-
 .../internal/cache/xmlcache/CacheCreation.java  |    2 +-
 .../internal/beans/MemberMBeanBridge.java       |   13 +-
 .../test/java/org/apache/geode/TXJUnitTest.java |  844 ++++---
 .../java/org/apache/geode/TXWriterTestCase.java |   67 +-
 ...esourceManagerWithQueryMonitorDUnitTest.java |   11 +-
 .../NewDeclarativeIndexCreationJUnitTest.java   |  185 +-
 .../geode/disttx/DistTXDebugDUnitTest.java      |   98 +-
 .../apache/geode/disttx/DistTXJUnitTest.java    |   37 +-
 .../disttx/DistTXPersistentDebugDUnitTest.java  |   15 +-
 .../geode/disttx/DistTXWriterJUnitTest.java     |   37 +-
 .../geode/disttx/DistTXWriterOOMEJUnitTest.java |   39 +-
 .../disttx/DistributedTransactionDUnitTest.java |    9 +-
 .../apache/geode/disttx/PRDistTXJUnitTest.java  |   25 +-
 .../geode/internal/cache/PRTXJUnitTest.java     |  106 +-
 .../ParallelQueueRemovalMessageJUnitTest.java   |  104 +-
 .../cache/query/internal/cq/CqServiceImpl.java  |    2 +-
 .../query/dunit/QueryMonitorDUnitTest.java      |   20 +-
 .../web/controllers/CommonCrudController.java   |    4 +-
 32 files changed, 1895 insertions(+), 2191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/CancelCriterion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/CancelCriterion.java b/geode-core/src/main/java/org/apache/geode/CancelCriterion.java
index e4f9a41..fec3827 100644
--- a/geode-core/src/main/java/org/apache/geode/CancelCriterion.java
+++ b/geode-core/src/main/java/org/apache/geode/CancelCriterion.java
@@ -22,20 +22,20 @@ package org.apache.geode;
  * 
  * Code inside the service can check to see if the service is cancelled by calling
  * {@link #checkCancelInProgress(Throwable)}. Generally the pattern is to check before performing an
- * operation, check if the service is canceled before propgrating an exception futher up the stack,
- * and check for cancelation inside a long loop. Eg.
+ * operation, check if the service is canceled before propagating an exception further up the stack,
+ * and check for cancellation inside a long loop. Eg.
  * 
- * <code>
- * while(true) {
+ * <pre>
+ * while (true) {
  *   c.checkCancelInProgress(null);
  *   try {
- *      dispatchEvents();
- *   } catch(IOException e) {
+ *     dispatchEvents();
+ *   } catch (IOException e) {
  *     c.checkCancelInProgress(e);
  *     throw e;
  *   }
  * }
- * </code>
+ * </pre>
  * 
  * @see CancelException
  * @since GemFire 5.1
@@ -51,10 +51,6 @@ public abstract class CancelCriterion {
    *         exception indicating the service is shut down.
    */
   public abstract String cancelInProgress();
-  // import org.apache.geode.distributed.internal.DistributionManager;
-  // * <p>
-  // * In particular, a {@link DistributionManager} returns a non-null result if
-  // * message distribution has been terminated.
 
   /**
    * Use this utility function in your implementation of cancelInProgress() and cancelled() to
@@ -95,11 +91,11 @@ public abstract class CancelCriterion {
    * This method should wrap the exception in a service specific CancelationException (eg
    * CacheClosedException). or return null if the service is not being canceled.
    * 
-   * @param e an underlying exception, if any
+   * @param throwable an underlying exception, if any
    * @return RuntimeException to be thrown by checkCancelInProgress(), null if the receiver has not
    *         been cancelled.
    */
-  abstract public RuntimeException generateCancelledException(Throwable e);
+  public abstract RuntimeException generateCancelledException(Throwable throwable);
 
   /**
    * Checks to see if a cancellation is in progress. This is equivalent to the expression
@@ -111,5 +107,4 @@ public abstract class CancelCriterion {
     return cancelInProgress() != null;
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/cache/Cache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/Cache.java b/geode-core/src/main/java/org/apache/geode/cache/Cache.java
index bc4aa19..66a3cd8 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/Cache.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/Cache.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.cache;
 
 import java.util.List;
@@ -34,7 +33,6 @@ import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.i18n.LogWriterI18n;
 
-
 /**
  * Caches are obtained from the {@link CacheFactory#create()} method. See {@link CacheFactory} for
  * common usage patterns for creating the cache instance.
@@ -43,17 +41,15 @@ import org.apache.geode.i18n.LogWriterI18n;
  * where to find other caches on the network and how to communicate with them. The system can also
  * specify a <a href="../distribution/DistributedSystem.html#cache-xml-file">"cache-xml-file"</a>
  * property which will cause this cache to be initialized with the contents of that file. The
- * contents must comply with the <code>"doc-files/cache8_0.dtd"</code> file and the top level
- * element must be a <code>cache</code> element.
+ * contents must comply with the {@code "doc-files/cache8_0.dtd"} file and the top level element
+ * must be a {@code cache} element.
  * <p>
  * When a cache will no longer be used it should be {@link #close() closed}. Once it
  * {@link #isClosed is closed} any attempt to use it or any {@link Region} obtained from it will
  * cause a {@link CacheClosedException} to be thrown.
- *
  * <p>
  * A cache can have multiple root regions, each with a different name.
  *
- *
  * @since GemFire 2.0
  */
 @SuppressWarnings("deprecation")
@@ -63,13 +59,13 @@ public interface Cache extends GemFireCache {
    * region in the cache. After this cache is closed, any further method call on this cache or any
    * region object will throw {@link CacheClosedException}, unless otherwise noted.
    * 
-   * @param keepalive whether the server should keep the durable client's queues alive for the
+   * @param keepAlive whether the server should keep the durable client's queues alive for the
    *        timeout period
    * @throws CacheClosedException if the cache is already closed.
    * @deprecated as of 6.5 use {@link ClientCache#close(boolean)} instead.
    */
   @Deprecated
-  public void close(boolean keepalive);
+  void close(boolean keepAlive);
 
   /**
    * Creates a VM region using the specified RegionAttributes.
@@ -88,7 +84,7 @@ public interface Cache extends GemFireCache {
    * @deprecated as of GemFire 5.0, use {@link #createRegion} instead.
    */
   @Deprecated
-  public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> aRegionAttributes)
+  <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> aRegionAttributes)
       throws RegionExistsException, TimeoutException;
 
   /**
@@ -109,7 +105,7 @@ public interface Cache extends GemFireCache {
    * @deprecated as of 6.5 use {@link #createRegionFactory(RegionAttributes)} instead
    */
   @Deprecated
-  public <K, V> Region<K, V> createRegion(String name, RegionAttributes<K, V> aRegionAttributes)
+  <K, V> Region<K, V> createRegion(String name, RegionAttributes<K, V> aRegionAttributes)
       throws RegionExistsException, TimeoutException;
 
   /**
@@ -119,7 +115,7 @@ public interface Cache extends GemFireCache {
    * @see #createRegionFactory(RegionShortcut)
    * @since GemFire 6.5
    */
-  public <K, V> RegionFactory<K, V> createRegionFactory();
+  <K, V> RegionFactory<K, V> createRegionFactory();
 
   /**
    * Creates a {@link RegionFactory} for the most commonly used {@link Region} types defined by
@@ -127,7 +123,7 @@ public interface Cache extends GemFireCache {
    * 
    * @since GemFire 6.5
    */
-  public <K, V> RegionFactory<K, V> createRegionFactory(RegionShortcut atts);
+  <K, V> RegionFactory<K, V> createRegionFactory(RegionShortcut shortcut);
 
   /**
    * Creates a {@link RegionFactory} for creating a {@link Region} from {@link RegionAttributes}
@@ -137,7 +133,7 @@ public interface Cache extends GemFireCache {
    * @see #setRegionAttributes(String, RegionAttributes)
    * @since GemFire 6.5
    */
-  public <K, V> RegionFactory<K, V> createRegionFactory(String regionAttributesId);
+  <K, V> RegionFactory<K, V> createRegionFactory(String regionAttributesId);
 
   /**
    * Creates a {@link RegionFactory} for creating a {@link Region} from the given regionAttributes
@@ -146,43 +142,43 @@ public interface Cache extends GemFireCache {
    * @see #createRegionFactory(RegionShortcut)
    * @since GemFire 6.5
    */
-  public <K, V> RegionFactory<K, V> createRegionFactory(RegionAttributes<K, V> regionAttributes);
+  <K, V> RegionFactory<K, V> createRegionFactory(RegionAttributes<K, V> regionAttributes);
 
   /**
    * Internal GemStone method for accessing the internationalized logging object for GemFire, use
-   * {@link #getLogger()} instead. This method does not throw <code>CacheClosedException</code> if
-   * the cache is closed.
+   * {@link #getLogger()} instead. This method does not throw {@code CacheClosedException} if the
+   * cache is closed.
    * 
    * @return the logging object
    * @deprecated as of 6.5 use getLogger().convertToLogWriterI18n() instead
    */
   @Deprecated
-  public LogWriterI18n getLoggerI18n();
+  LogWriterI18n getLoggerI18n();
 
   /**
    * Internal GemStone method for accessing the internationalized logging object for GemFire, use
-   * {@link #getSecurityLogger()} instead. This method does not throw
-   * <code>CacheClosedException</code> if the cache is closed.
+   * {@link #getSecurityLogger()} instead. This method does not throw {@code CacheClosedException}
+   * if the cache is closed.
    * 
    * @return the security logging object
    * @deprecated as of 6.5 use getSecurityLogger().convertToLogWriterI18n() instead
    */
   @Deprecated
-  public LogWriterI18n getSecurityLoggerI18n();
+  LogWriterI18n getSecurityLoggerI18n();
 
   /**
    * Gets the number of seconds a cache operation will wait to obtain a distributed lock lease. This
-   * method does not throw <code>CacheClosedException</code> if the cache is closed.
+   * method does not throw {@code CacheClosedException} if the cache is closed.
    */
-  public int getLockTimeout();
+  int getLockTimeout();
 
   /**
    * Sets the number of seconds a cache operation may wait to obtain a distributed lock lease before
    * timing out.
    *
-   * @throws IllegalArgumentException if <code>seconds</code> is less than zero
+   * @throws IllegalArgumentException if {@code seconds} is less than zero
    */
-  public void setLockTimeout(int seconds);
+  void setLockTimeout(int seconds);
 
   /**
    * Gets the frequency (in seconds) at which a message will be sent by the primary cache-server to
@@ -191,45 +187,45 @@ public interface Cache extends GemFireCache {
    * 
    * @return The time interval in seconds
    */
-  public int getMessageSyncInterval();
+  int getMessageSyncInterval();
 
   /**
    * Sets the frequency (in seconds) at which a message will be sent by the primary cache-server
    * node to all the secondary cache-server nodes to remove the events which have already been
    * dispatched from the queue.
    * 
-   * @param seconds - the time interval in seconds
-   * @throws IllegalArgumentException if <code>seconds</code> is less than zero
+   * @param seconds the time interval in seconds
+   * @throws IllegalArgumentException if {@code seconds} is less than zero
    */
-  public void setMessageSyncInterval(int seconds);
+  void setMessageSyncInterval(int seconds);
 
   /**
    * Gets the length, in seconds, of distributed lock leases obtained by this cache. This method
-   * does not throw <code>CacheClosedException</code> if the cache is closed.
+   * does not throw {@code CacheClosedException} if the cache is closed.
    */
-  public int getLockLease();
+  int getLockLease();
 
   /**
    * Sets the length, in seconds, of distributed lock leases obtained by this cache.
    *
-   * @throws IllegalArgumentException if <code>seconds</code> is less than zero.
+   * @throws IllegalArgumentException if {@code seconds} is less than zero.
    */
-  public void setLockLease(int seconds);
+  void setLockLease(int seconds);
 
   /**
    * Gets the number of seconds a cache {@link org.apache.geode.cache.Region#get(Object) get}
    * operation can spend searching for a value before it times out. The search includes any time
    * spent loading the object. When the search times out it causes the get to fail by throwing an
-   * exception. This method does not throw <code>CacheClosedException</code> if the cache is closed.
+   * exception. This method does not throw {@code CacheClosedException} if the cache is closed.
    */
-  public int getSearchTimeout();
+  int getSearchTimeout();
 
   /**
    * Sets the number of seconds a cache get operation can spend searching for a value.
    *
-   * @throws IllegalArgumentException if <code>seconds</code> is less than zero
+   * @throws IllegalArgumentException if {@code seconds} is less than zero
    */
-  public void setSearchTimeout(int seconds);
+  void setSearchTimeout(int seconds);
 
   /**
    * Creates a new cache server, with the default configuration, that will allow clients to access
@@ -242,17 +238,17 @@ public interface Cache extends GemFireCache {
    *
    * @since GemFire 5.7
    */
-  public CacheServer addCacheServer();
+  CacheServer addCacheServer();
 
   /**
-   * Returns a collection of all of the <code>CacheServer</code>s that can serve the contents of
-   * this <code>Cache</code> to clients.
+   * Returns a collection of all of the {@code CacheServer}s that can serve the contents of this
+   * {@code Cache} to clients.
    *
    * @see #addCacheServer
    *
    * @since GemFire 5.7
    */
-  public List<CacheServer> getCacheServers();
+  List<CacheServer> getCacheServers();
 
   /**
    * Adds a gateway event conflict resolution resolver. This is invoked if an event is processed
@@ -262,27 +258,27 @@ public interface Cache extends GemFireCache {
    * event's distributed system ID is larger than that of the last event to modify the affected
    * entry.
    * 
-   * @param resolver
+   * @param resolver gateway event conflict resolution resolver
    * @since GemFire 7.0
    */
-  public void setGatewayConflictResolver(GatewayConflictResolver resolver);
+  void setGatewayConflictResolver(GatewayConflictResolver resolver);
 
   /**
    * Returns the current gateway event conflict resolver
    * 
    * @since GemFire 7.0
    */
-  public GatewayConflictResolver getGatewayConflictResolver();
+  GatewayConflictResolver getGatewayConflictResolver();
 
   /**
-   * Sets whether or not this <code>Cache</code> resides in a long-running "cache server" VM. A
-   * cache server may be an application VM or may be a stand-along VM launched using
+   * Sets whether or not this {@code Cache} resides in a long-running "cache server" VM. A cache
+   * server may be an application VM or may be a stand-along VM launched using
    * {@linkplain org.apache.geode.admin.AdminDistributedSystem#addCacheServer administration API} or
-   * the <code>cacheserver</code> command line utility.
+   * the {@code cacheserver} command line utility.
    *
    * @since GemFire 4.0
    */
-  public void setIsServer(boolean isServer);
+  void setIsServer(boolean isServer);
 
   /**
    * Returns whether or not this cache resides in a "cache server" VM.
@@ -291,7 +287,7 @@ public interface Cache extends GemFireCache {
    *
    * @since GemFire 4.0
    */
-  public boolean isServer();
+  boolean isServer();
 
   /**
    * Notifies the server that this client is ready to receive updates. This method is used by
@@ -309,7 +305,7 @@ public interface Cache extends GemFireCache {
    * @deprecated as of 6.5 use {@link ClientCache#readyForEvents} instead.
    */
   @Deprecated
-  public void readyForEvents();
+  void readyForEvents();
 
   /**
    * Creates {@link GatewaySenderFactory} for creating a SerialGatewaySender
@@ -317,7 +313,8 @@ public interface Cache extends GemFireCache {
    * @return SerialGatewaySenderFactory
    * @since GemFire 7.0
    */
-  public GatewaySenderFactory createGatewaySenderFactory();
+  @Override
+  GatewaySenderFactory createGatewaySenderFactory();
 
   /**
    * Creates {@link AsyncEventQueueFactory} for creating a AsyncEventQueue
@@ -325,7 +322,7 @@ public interface Cache extends GemFireCache {
    * @return AsyncEventQueueFactory
    * @since GemFire 7.0
    */
-  public AsyncEventQueueFactory createAsyncEventQueueFactory();
+  AsyncEventQueueFactory createAsyncEventQueueFactory();
 
   /**
    * Creates {@link GatewayReceiverFactory} for creating a GatewayReceiver
@@ -333,7 +330,7 @@ public interface Cache extends GemFireCache {
    * @return GatewayReceiverFactory
    * @since GemFire 7.0
    */
-  public GatewayReceiverFactory createGatewayReceiverFactory();
+  GatewayReceiverFactory createGatewayReceiverFactory();
 
   /**
    * Returns all {@link GatewaySender}s for this Cache.
@@ -341,7 +338,7 @@ public interface Cache extends GemFireCache {
    * @return Set of GatewaySenders
    * @since GemFire 7.0
    */
-  public Set<GatewaySender> getGatewaySenders();
+  Set<GatewaySender> getGatewaySenders();
 
   /**
    * Returns the {@link GatewaySender} with the given id added to this Cache.
@@ -349,7 +346,7 @@ public interface Cache extends GemFireCache {
    * @return GatewaySender with id
    * @since GemFire 7.0
    */
-  public GatewaySender getGatewaySender(String id);
+  GatewaySender getGatewaySender(String id);
 
   /**
    * Returns all {@link GatewayReceiver}s for this Cache
@@ -357,7 +354,7 @@ public interface Cache extends GemFireCache {
    * @return Set of GatewaySenders
    * @since GemFire 7.0
    */
-  public Set<GatewayReceiver> getGatewayReceivers();
+  Set<GatewayReceiver> getGatewayReceivers();
 
   /**
    * Returns all {@link AsyncEventQueue}s for this Cache
@@ -365,7 +362,7 @@ public interface Cache extends GemFireCache {
    * @return Set of AsyncEventQueue
    * @since GemFire 7.0
    */
-  public Set<AsyncEventQueue> getAsyncEventQueues();
+  Set<AsyncEventQueue> getAsyncEventQueues();
 
   /**
    * Returns the {@link AsyncEventQueue} with the given id added to this Cache.
@@ -373,37 +370,37 @@ public interface Cache extends GemFireCache {
    * @return AsyncEventQueue with id
    * @since GemFire 7.0
    */
-  public AsyncEventQueue getAsyncEventQueue(String id);
+  AsyncEventQueue getAsyncEventQueue(String id);
 
   /**
    * Returns a set of the other non-administrative members in the distributed system.
    * 
    * @since GemFire 6.6
    */
-  public Set<DistributedMember> getMembers();
+  Set<DistributedMember> getMembers();
 
   /**
    * Returns a set of the administrative members in the distributed system.
    * 
    * @since GemFire 6.6
    */
-  public Set<DistributedMember> getAdminMembers();
+  Set<DistributedMember> getAdminMembers();
 
   /**
    * Returns a set of the members in the distributed system that have the given region. For regions
    * with local scope an empty set is returned.
    * 
-   * @param r a Region in the cache
+   * @param region a Region in the cache
    * @since GemFire 6.6
    */
-  public Set<DistributedMember> getMembers(Region r);
+  Set<DistributedMember> getMembers(Region region);
 
   /**
    * Obtains the snapshot service to allow the cache data to be imported or exported.
    * 
    * @return the snapshot service
    */
-  public CacheSnapshotService getSnapshotService();
+  CacheSnapshotService getSnapshotService();
 
   /**
    * Test to see whether the Cache is in the process of reconnecting and recreating a new cache
@@ -415,30 +412,29 @@ public interface Cache extends GemFireCache {
    * 
    * @return true if the Cache is attempting to reconnect or has finished reconnecting
    */
-  public boolean isReconnecting();
+  boolean isReconnecting();
 
   /**
    * Wait for the Cache to finish reconnecting to the distributed system and recreate a new Cache.
    * 
    * @see #getReconnectedCache
    * @param time amount of time to wait, or -1 to wait forever
-   * @param units
+   * @param units time unit
    * @return true if the cache was reconnected
    * @throws InterruptedException if the thread is interrupted while waiting
    */
-  public boolean waitUntilReconnected(long time, TimeUnit units) throws InterruptedException;
+  boolean waitUntilReconnected(long time, TimeUnit units) throws InterruptedException;
 
   /**
    * Force the Cache to stop reconnecting. If the Cache is currently connected this will disconnect
    * and close it.
    * 
    */
-  public void stopReconnecting();
+  void stopReconnecting();
 
   /**
    * Returns the new Cache if there was an auto-reconnect and the cache was recreated.
    */
-  public Cache getReconnectedCache();
+  Cache getReconnectedCache();
 
 }
-

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/cache/client/internal/ProxyCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ProxyCache.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ProxyCache.java
index 76306f5..f4a8d5b 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ProxyCache.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ProxyCache.java
@@ -221,22 +221,16 @@ public class ProxyCache implements RegionService {
     return this.stopper;
   }
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.geode.cache.RegionService#rootRegions()
-   */
+  @Override
   public Set<Region<?, ?>> rootRegions() {
     preOp();
-    Set<Region<?, ?>> rRegions = new HashSet<Region<?, ?>>();
-    Iterator<LocalRegion> it = this.cache.rootRegions().iterator();
-    while (it.hasNext()) {
-      LocalRegion lr = it.next();
-      if (!lr.getAttributes().getDataPolicy().withStorage()) {
-        rRegions.add(new ProxyRegion(this, lr));
+    Set<Region<?, ?>> rootRegions = new HashSet<>();
+    for (Region<?, ?> region : this.cache.rootRegions()) {
+      if (!region.getAttributes().getDataPolicy().withStorage()) {
+        rootRegions.add(new ProxyRegion(this, region));
       }
     }
-    return Collections.unmodifiableSet(rRegions);
+    return Collections.unmodifiableSet(rootRegions);
   }
 
   public PdxInstanceFactory createPdxInstanceFactory(String className) {

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryMonitor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryMonitor.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryMonitor.java
index d6acfbf..569fbb0 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryMonitor.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryMonitor.java
@@ -103,7 +103,7 @@ public class QueryMonitor implements Runnable {
 
     /** For dunit test purpose */
     if (GemFireCacheImpl.getInstance() != null
-        && GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME > 0) {
+        && GemFireCacheImpl.getInstance().testMaxQueryExecutionTime > 0) {
       if (this.queryMonitorTasks == null) {
         this.queryMonitorTasks = new ConcurrentHashMap();
       }
@@ -127,8 +127,8 @@ public class QueryMonitor implements Runnable {
 
       // START - DUnit Test purpose.
       if (GemFireCacheImpl.getInstance() != null
-          && GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME > 0) {
-        long maxTimeSet = GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME;
+          && GemFireCacheImpl.getInstance().testMaxQueryExecutionTime > 0) {
+        long maxTimeSet = GemFireCacheImpl.getInstance().testMaxQueryExecutionTime;
         QueryThreadTask queryTask = (QueryThreadTask) queryThreads.peek();
 
         long currentTime = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/distributed/internal/DM.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DM.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DM.java
index 328a4f8..afc8125 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DM.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DM.java
@@ -33,9 +33,6 @@ import org.apache.geode.internal.Version;
 
 /**
  * This interface defines the services provided by any class that is a distribution manager.
- *
- *
- *
  */
 public interface DM extends ReplySender {
 
@@ -463,4 +460,6 @@ public interface DM extends ReplySender {
    * forceUDPMessagingForCurrentThread.
    */
   public void releaseUDPMessagingForCurrentThread();
+
+  int getDMType();
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java
index 2ae86e6..6920311 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java
@@ -1756,7 +1756,7 @@ public class DistributionManager implements DM {
   }
 
   /**
-   * Add a membership listener and return other DistribtionManagerIds as an atomic operation
+   * Add a membership listener and return other DistributionManagerIds as an atomic operation
    */
   public Set addMembershipListenerAndGetDistributionManagerIds(MembershipListener l) {
     // switched sync order to fix bug 30360

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
index 987e491..86bc7a4 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
@@ -117,6 +117,11 @@ import org.apache.geode.security.GemFireSecurityException;
 public class InternalDistributedSystem extends DistributedSystem
     implements OsStatisticsFactory, StatisticsManager {
 
+  /**
+   * True if the user is allowed lock when memory resources appear to be overcommitted.
+   */
+  private static final boolean ALLOW_MEMORY_LOCK_WHEN_OVERCOMMITTED =
+      Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "Cache.ALLOW_MEMORY_OVERCOMMIT");
   private static final Logger logger = LogService.getLogger();
 
   public static final String DISABLE_MANAGEMENT_PROPERTY =
@@ -654,7 +659,7 @@ public class InternalDistributedSystem extends DistributedSystem
         long avail = LinuxProcFsStatistics.getAvailableMemory(logger);
         long size = offHeapMemorySize + Runtime.getRuntime().totalMemory();
         if (avail < size) {
-          if (GemFireCacheImpl.ALLOW_MEMORY_LOCK_WHEN_OVERCOMMITTED) {
+          if (ALLOW_MEMORY_LOCK_WHEN_OVERCOMMITTED) {
             logger.warn(LocalizedMessage.create(
                 LocalizedStrings.InternalDistributedSystem_MEMORY_OVERCOMMIT_WARN, size - avail));
           } else {

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java
index af4e674..e9068e6 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java
@@ -1357,6 +1357,11 @@ public class LonerDistributionManager implements DM {
   }
 
   @Override
+  public int getDMType() {
+    return 0;
+  }
+
+  @Override
   public boolean isSharedConfigurationServiceEnabledForDS() {
     // return false for loner
     return false;

http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXState.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXState.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXState.java
index 6df2623..226ffa6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXState.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXState.java
@@ -51,6 +51,8 @@ import org.apache.geode.internal.offheap.annotations.Released;
  */
 public class DistTXState extends TXState {
 
+  public static Runnable internalBeforeApplyChanges;
+  public static Runnable internalBeforeNonTXBasicPut;
   private boolean updatingTxStateDuringPreCommit = false;
 
   public DistTXState(TXStateProxy proxy, boolean onBehalfOfRemoteStub) {
@@ -263,8 +265,8 @@ public class DistTXState extends TXState {
       try {
         attachFilterProfileInformation(entries);
 
-        if (GemFireCacheImpl.internalBeforeApplyChanges != null) {
-          GemFireCacheImpl.internalBeforeApplyChanges.run();
+        if (internalBeforeApplyChanges != null) {
+          internalBeforeApplyChanges.run();
         }
 
         // apply changes to the cache


[3/6] geode git commit: Safe refactorings

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5b8cbe8/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index 56243e1..29e9f95 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -12,12 +12,69 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.internal.cache;
 
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.Reader;
+import java.io.StringBufferInputStream;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.naming.Context;
+import javax.transaction.TransactionManager;
+
 import com.sun.jna.Native;
 import com.sun.jna.Platform;
 import org.apache.commons.lang.StringUtils;
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.CancelException;
 import org.apache.geode.ForcedDisconnectException;
@@ -89,6 +146,7 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.DistributedSystemDisconnectedException;
 import org.apache.geode.distributed.Locator;
 import org.apache.geode.distributed.internal.CacheTime;
+import org.apache.geode.distributed.internal.ClusterConfigurationService;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.DistributionAdvisee;
 import org.apache.geode.distributed.internal.DistributionAdvisor;
@@ -103,7 +161,6 @@ import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.distributed.internal.ResourceEvent;
 import org.apache.geode.distributed.internal.ResourceEventsListener;
 import org.apache.geode.distributed.internal.ServerLocation;
-import org.apache.geode.distributed.internal.ClusterConfigurationService;
 import org.apache.geode.distributed.internal.locks.DLockService;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.i18n.LogWriterI18n;
@@ -125,7 +182,6 @@ import org.apache.geode.internal.cache.partitioned.RedundancyAlreadyMetException
 import org.apache.geode.internal.cache.persistence.BackupManager;
 import org.apache.geode.internal.cache.persistence.PersistentMemberID;
 import org.apache.geode.internal.cache.persistence.PersistentMemberManager;
-import org.apache.geode.internal.cache.persistence.query.TemporaryResultSetFactory;
 import org.apache.geode.internal.cache.snapshot.CacheSnapshotServiceImpl;
 import org.apache.geode.internal.cache.tier.sockets.AcceptorImpl;
 import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier;
@@ -173,61 +229,6 @@ import org.apache.geode.pdx.internal.PdxInstanceFactoryImpl;
 import org.apache.geode.pdx.internal.PdxInstanceImpl;
 import org.apache.geode.pdx.internal.TypeRegistry;
 import org.apache.geode.redis.GeodeRedisServer;
-import org.apache.logging.log4j.Logger;
-
-import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.PrintStream;
-import java.io.Reader;
-import java.io.StringBufferInputStream;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.ServiceLoader;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-import javax.naming.Context;
 
 // TODO: somebody Come up with more reasonable values for {@link #DEFAULT_LOCK_TIMEOUT}, etc.
 /**
@@ -238,23 +239,22 @@ public class GemFireCacheImpl
     implements InternalCache, ClientCache, HasCachePerfStats, DistributionAdvisee, CacheTime {
   private static final Logger logger = LogService.getLogger();
 
-  // moved *SERIAL_NUMBER stuff to DistributionAdvisor
-
   /** The default number of seconds to wait for a distributed lock */
-  public static final int DEFAULT_LOCK_TIMEOUT = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultLockTimeout", 60).intValue();
+  public static final int DEFAULT_LOCK_TIMEOUT =
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultLockTimeout", 60);
 
   /**
    * The default duration (in seconds) of a lease on a distributed lock
    */
-  public static final int DEFAULT_LOCK_LEASE = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultLockLease", 120).intValue();
+  public static final int DEFAULT_LOCK_LEASE =
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultLockLease", 120);
 
   /** The default "copy on read" attribute value */
   public static final boolean DEFAULT_COPY_ON_READ = false;
 
   /** the last instance of GemFireCache created */
   private static volatile GemFireCacheImpl instance = null;
+
   /**
    * Just like instance but is valid for a bit longer so that pdx can still find the cache during a
    * close.
@@ -262,16 +262,15 @@ public class GemFireCacheImpl
   private static volatile GemFireCacheImpl pdxInstance = null;
 
   /**
-   * The default amount of time to wait for a <code>netSearch</code> to complete
+   * The default amount of time to wait for a {@code netSearch} to complete
    */
-  public static final int DEFAULT_SEARCH_TIMEOUT = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultSearchTimeout", 300).intValue();
+  public static final int DEFAULT_SEARCH_TIMEOUT =
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.defaultSearchTimeout", 300);
 
   /**
-   * The <code>CacheLifecycleListener</code> s that have been registered in this VM
+   * The {@code CacheLifecycleListener} s that have been registered in this VM
    */
-  private static final Set<CacheLifecycleListener> cacheLifecycleListeners =
-      new HashSet<CacheLifecycleListener>();
+  private static final Set<CacheLifecycleListener> cacheLifecycleListeners = new HashSet<>();
 
   /**
    * Define gemfire.Cache.ASYNC_EVENT_LISTENERS=true to invoke event listeners in the background
@@ -284,80 +283,63 @@ public class GemFireCacheImpl
    * (the default) then the size of the entry value is unchanged by a delta application. Not a final
    * so that tests can change this value.
    *
-   * @since GemFire hitachi 6.1.2.9
+   * @since GemFire h****** 6.1.2.9
    */
-  public static boolean DELTAS_RECALCULATE_SIZE =
+  static boolean DELTAS_RECALCULATE_SIZE =
       Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "DELTAS_RECALCULATE_SIZE");
 
-  public static final int EVENT_QUEUE_LIMIT = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.EVENT_QUEUE_LIMIT", 4096).intValue();
-  public static final int EVENT_THREAD_LIMIT = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.EVENT_THREAD_LIMIT", 16).intValue();
+  private static final int EVENT_QUEUE_LIMIT =
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.EVENT_QUEUE_LIMIT", 4096);
+
+  static final int EVENT_THREAD_LIMIT =
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.EVENT_THREAD_LIMIT", 16);
 
   /**
    * System property to limit the max query-execution time. By default its turned off (-1), the time
-   * is set in MiliSecs.
+   * is set in milliseconds.
    */
   public static final int MAX_QUERY_EXECUTION_TIME =
-      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.MAX_QUERY_EXECUTION_TIME", -1)
-          .intValue();
+      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "Cache.MAX_QUERY_EXECUTION_TIME", -1);
 
   /**
    * System property to disable query monitor even if resource manager is in use
    */
-  public final boolean QUERY_MONITOR_DISABLED_FOR_LOW_MEM = Boolean
+  private final boolean queryMonitorDisabledForLowMem = Boolean
       .getBoolean(DistributionConfig.GEMFIRE_PREFIX + "Cache.DISABLE_QUERY_MONITOR_FOR_LOW_MEMORY");
 
   /**
    * Property set to true if resource manager heap percentage is set and query monitor is required
    */
-  public static Boolean QUERY_MONITOR_REQUIRED_FOR_RESOURCE_MANAGER = Boolean.FALSE;
-
-  /**
-   * This property defines internal function that will get executed on each node to fetch active
-   * REST service endpoints (servers).
-   */
-  public static final String FIND_REST_ENABLED_SERVERS_FUNCTION_ID =
-      FindRestEnabledServersFunction.class.getName();
-
-  /**
-   * True if the user is allowed lock when memory resources appear to be overcommitted.
-   */
-  public static final boolean ALLOW_MEMORY_LOCK_WHEN_OVERCOMMITTED =
-      Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "Cache.ALLOW_MEMORY_OVERCOMMIT");
+  private static boolean queryMonitorRequiredForResourceManager = false;
 
-  // time in ms
+  /** time in milliseconds */
   private static final int FIVE_HOURS = 5 * 60 * 60 * 1000;
-  /** To test MAX_QUERY_EXECUTION_TIME option. */
-  public int TEST_MAX_QUERY_EXECUTION_TIME = -1;
-  public boolean TEST_MAX_QUERY_EXECUTION_TIME_OVERRIDE_EXCEPTION = false;
 
-  // ///////////////////// Instance Fields ///////////////////////
+  /** To test MAX_QUERY_EXECUTION_TIME option. */
+  public int testMaxQueryExecutionTime = -1;
 
   private final InternalDistributedSystem system;
 
   private final DM dm;
 
-  // This is a HashMap because I know that clear() on it does
-  // not allocate objects.
-  private final HashMap rootRegions;
+  private final Map<String, LocalRegion> rootRegions;
 
   /**
    * True if this cache is being created by a ClientCacheFactory.
    */
   private final boolean isClient;
-  private PoolFactory clientpf;
+
+  private PoolFactory poolFactory;
+
   /**
    * It is not final to allow cache.xml parsing to set it.
    */
   private Pool defaultPool;
 
-  private final ConcurrentMap pathToRegion = new ConcurrentHashMap();
+  private final ConcurrentMap<String, Region<?, ?>> pathToRegion = new ConcurrentHashMap<>();
 
-  protected volatile boolean isInitialized = false;
-  protected volatile boolean isClosing = false;
-  protected volatile boolean closingGatewaySendersByShutdownAll = false;
-  protected volatile boolean closingGatewayReceiversByShutdownAll = false;
+  private volatile boolean isInitialized = false;
+  volatile boolean isClosing = false;
 
   /** Amount of time (in seconds) to wait for a distributed lock */
   private int lockTimeout = DEFAULT_LOCK_TIMEOUT;
@@ -365,7 +347,7 @@ public class GemFireCacheImpl
   /** Amount of time a lease of a distributed lock lasts */
   private int lockLease = DEFAULT_LOCK_LEASE;
 
-  /** Amount of time to wait for a <code>netSearch</code> to complete */
+  /** Amount of time to wait for a {@code netSearch} to complete */
   private int searchTimeout = DEFAULT_SEARCH_TIMEOUT;
 
   private final CachePerfStats cachePerfStats;
@@ -381,14 +363,14 @@ public class GemFireCacheImpl
    * retrieval operations. It is assumed that the traversal operations on cache servers list vastly
    * outnumber the mutative operations such as add, remove.
    */
-  private volatile List allCacheServers = new CopyOnWriteArrayList();
+  private final List<CacheServerImpl> allCacheServers = new CopyOnWriteArrayList<>();
 
   /**
    * Controls updates to the list of all gateway senders
    *
    * @see #allGatewaySenders
    */
-  public final Object allGatewaySendersLock = new Object();
+  private final Object allGatewaySendersLock = new Object();
 
   /**
    * the set of all gateway senders. It may be fetched safely (for enumeration), but updates must by
@@ -400,22 +382,20 @@ public class GemFireCacheImpl
    * The list of all async event queues added to the cache. CopyOnWriteArrayList is used to allow
    * concurrent add, remove and retrieval operations.
    */
-  private volatile Set<AsyncEventQueue> allVisibleAsyncEventQueues =
-      new CopyOnWriteArraySet<AsyncEventQueue>();
+  private final Set<AsyncEventQueue> allVisibleAsyncEventQueues = new CopyOnWriteArraySet<>();
 
   /**
    * The list of all async event queues added to the cache. CopyOnWriteArrayList is used to allow
    * concurrent add, remove and retrieval operations.
    */
-  private volatile Set<AsyncEventQueue> allAsyncEventQueues =
-      new CopyOnWriteArraySet<AsyncEventQueue>();
+  private final Set<AsyncEventQueue> allAsyncEventQueues = new CopyOnWriteArraySet<>();
 
   /**
    * Controls updates to the list of all gateway receivers
    *
    * @see #allGatewayReceivers
    */
-  public final Object allGatewayReceiversLock = new Object();
+  private final Object allGatewayReceiversLock = new Object();
 
   /**
    * the list of all gateway Receivers. It may be fetched safely (for enumeration), but updates must
@@ -423,10 +403,10 @@ public class GemFireCacheImpl
    */
   private volatile Set<GatewayReceiver> allGatewayReceivers = Collections.emptySet();
 
-  /** PartitionedRegion instances (for required-events notification */
-  // This is a HashSet because I know that clear() on it does not
-  // allocate any objects.
-  private final HashSet<PartitionedRegion> partitionedRegions = new HashSet<PartitionedRegion>();
+  /**
+   * PartitionedRegion instances (for required-events notification
+   */
+  private final Set<PartitionedRegion> partitionedRegions = new HashSet<>();
 
   /**
    * Fix for 42051 This is a map of regions that are in the process of being destroyed. We could
@@ -436,14 +416,14 @@ public class GemFireCacheImpl
    * that ID if it receives it as part of the persistent view.
    */
   private final ConcurrentMap<String, DistributedRegion> regionsInDestroy =
-      new ConcurrentHashMap<String, DistributedRegion>();
+      new ConcurrentHashMap<>();
 
-  public final Object allGatewayHubsLock = new Object();
+  private final Object allGatewayHubsLock = new Object();
 
   /**
    * conflict resolver for WAN, if any
    * 
-   * @guarded.By {@link #allGatewayHubsLock}
+   * GuardedBy {@link #allGatewayHubsLock}
    */
   private GatewayConflictResolver gatewayConflictResolver;
 
@@ -451,7 +431,7 @@ public class GemFireCacheImpl
   private boolean isServer = false;
 
   /** transaction manager for this cache */
-  private final TXManagerImpl txMgr;
+  private final TXManagerImpl transactionManager;
 
   private RestAgent restAgent;
 
@@ -461,7 +441,8 @@ public class GemFireCacheImpl
   private volatile boolean copyOnRead = DEFAULT_COPY_ON_READ;
 
   /** The named region attributes registered with this cache. */
-  private final Map namedRegionAttributes = Collections.synchronizedMap(new HashMap());
+  private final Map<String, RegionAttributes<?, ?>> namedRegionAttributes =
+      Collections.synchronizedMap(new HashMap<>());
 
   /**
    * if this cache was forced to close due to a forced-disconnect, we retain a
@@ -473,24 +454,24 @@ public class GemFireCacheImpl
    * if this cache was forced to close due to a forced-disconnect or system failure, this keeps
    * track of the reason
    */
-  protected volatile Throwable disconnectCause = null;
+  volatile Throwable disconnectCause = null;
 
   /** context where this cache was created -- for debugging, really... */
-  public Exception creationStack = null;
+  private Exception creationStack = null;
 
   /**
    * a system timer task for cleaning up old bridge thread event entries
    */
-  private EventTracker.ExpiryTask recordedEventSweeper;
+  private final EventTracker.ExpiryTask recordedEventSweeper;
 
-  private TombstoneService tombstoneService;
+  private final TombstoneService tombstoneService;
 
   /**
    * DistributedLockService for PartitionedRegions. Remains null until the first PartitionedRegion
    * is created. Destroyed by GemFireCache when closing the cache. Protected by synchronization on
    * this GemFireCache.
    *
-   * @guarded.By prLockServiceLock
+   * GuardedBy prLockServiceLock
    */
   private DistributedLockService prLockService;
 
@@ -503,7 +484,7 @@ public class GemFireCacheImpl
    * DistributedLockService for GatewaySenders. Remains null until the first GatewaySender is
    * created. Destroyed by GemFireCache when closing the cache.
    * 
-   * @guarded.By gatewayLockServiceLock
+   * GuardedBy gatewayLockServiceLock
    */
   private volatile DistributedLockService gatewayLockService;
 
@@ -514,7 +495,7 @@ public class GemFireCacheImpl
 
   private final InternalResourceManager resourceManager;
 
-  private final AtomicReference<BackupManager> backupManager = new AtomicReference<BackupManager>();
+  private final AtomicReference<BackupManager> backupManager = new AtomicReference<>();
 
   private HeapEvictor heapEvictor = null;
 
@@ -524,7 +505,7 @@ public class GemFireCacheImpl
 
   private final Object offHeapEvictorLock = new Object();
 
-  private ResourceEventsListener listener;
+  private ResourceEventsListener resourceEventsListener;
 
   /**
    * Enabled when CacheExistsException issues arise in debugging
@@ -539,7 +520,7 @@ public class GemFireCacheImpl
 
   private final PersistentMemberManager persistentMemberManager;
 
-  private ClientMetadataService clientMetadatService = null;
+  private ClientMetadataService clientMetadataService = null;
 
   private final Object clientMetaDatServiceLock = new Object();
 
@@ -557,20 +538,17 @@ public class GemFireCacheImpl
 
   private final DiskStoreMonitor diskMonitor;
 
-  // Stores the properties used to initialize declarables.
-  private final Map<Declarable, Properties> declarablePropertiesMap =
-      new ConcurrentHashMap<Declarable, Properties>();
+  /**
+   * Stores the properties used to initialize declarables.
+   */
+  private final Map<Declarable, Properties> declarablePropertiesMap = new ConcurrentHashMap<>();
 
   /** {@link PropertyResolver} to resolve ${} type property strings */
-  protected static PropertyResolver resolver;
+  private final PropertyResolver resolver;
 
-  protected static boolean xmlParameterizationEnabled =
+  private static final boolean XML_PARAMETERIZATION_ENABLED =
       !Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "xml.parameterization.disabled");
 
-  public static Runnable internalBeforeApplyChanges;
-
-  public static Runnable internalBeforeNonTXBasicPut;
-
   /**
    * the memcachedServer instance that is started when {@link DistributionConfig#getMemcachedPort()}
    * is specified
@@ -587,23 +565,19 @@ public class GemFireCacheImpl
    * 
    * @since GemFire 8.1
    */
-  private SimpleExtensionPoint<Cache> extensionPoint = new SimpleExtensionPoint<Cache>(this, this);
+  private final SimpleExtensionPoint<Cache> extensionPoint = new SimpleExtensionPoint<>(this, this);
 
   private final CqService cqService;
 
-  private final Set<RegionListener> regionListeners = new ConcurrentHashSet<RegionListener>();
+  private final Set<RegionListener> regionListeners = new ConcurrentHashSet<>();
 
-  private final Map<Class<? extends CacheService>, CacheService> services =
-      new HashMap<Class<? extends CacheService>, CacheService>();
+  private final Map<Class<? extends CacheService>, CacheService> services = new HashMap<>();
 
   public static final int DEFAULT_CLIENT_FUNCTION_TIMEOUT = 0;
 
   private static int clientFunctionTimeout;
 
-  private final static Boolean DISABLE_AUTO_EVICTION =
-      Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "disableAutoEviction");
-
-  private static SecurityService securityService = SecurityService.getSecurityService();
+  private final SecurityService securityService = SecurityService.getSecurityService();
 
   static {
     // this works around jdk bug 6427854, reported in ticket #44434
@@ -629,15 +603,13 @@ public class GemFireCacheImpl
    *        and stack as well as new memory mapped files or shared memory regions.
    * 
    * @return 0 if success, non-zero if error and errno set
-   * 
    */
   private static native int mlockall(int flags);
 
   public static void lockMemory() {
-    int result = 0;
     try {
       Native.register(Platform.C_LIBRARY_NAME);
-      result = mlockall(1);
+      int result = mlockall(1);
       if (result == 0) {
         return;
       }
@@ -645,14 +617,14 @@ public class GemFireCacheImpl
       throw new IllegalStateException("Error trying to lock memory", t);
     }
 
-    int errno = Native.getLastError();
-    String msg = "mlockall failed: " + errno;
-    if (errno == 1 || errno == 12) { // EPERM || ENOMEM
-      msg = "Unable to lock memory due to insufficient free space or privileges.  "
+    int lastError = Native.getLastError();
+    String message = "mlockall failed: " + lastError;
+    if (lastError == 1 || lastError == 12) { // EPERM || ENOMEM
+      message = "Unable to lock memory due to insufficient free space or privileges.  "
           + "Please check the RLIMIT_MEMLOCK soft resource limit (ulimit -l) and "
           + "increase the available memory if needed";
     }
-    throw new IllegalStateException(msg);
+    throw new IllegalStateException(message);
   }
 
   /**
@@ -661,18 +633,18 @@ public class GemFireCacheImpl
    */
   @Override
   public String toString() {
-    final StringBuffer sb = new StringBuffer();
+    final StringBuilder sb = new StringBuilder();
     sb.append("GemFireCache[");
-    sb.append("id = " + System.identityHashCode(this));
-    sb.append("; isClosing = " + this.isClosing);
-    sb.append("; isShutDownAll = " + isCacheAtShutdownAll());
-    sb.append("; created = " + this.creationDate);
-    sb.append("; server = " + this.isServer);
-    sb.append("; copyOnRead = " + this.copyOnRead);
-    sb.append("; lockLease = " + this.lockLease);
-    sb.append("; lockTimeout = " + this.lockTimeout);
+    sb.append("id = ").append(System.identityHashCode(this));
+    sb.append("; isClosing = ").append(this.isClosing);
+    sb.append("; isShutDownAll = ").append(isCacheAtShutdownAll());
+    sb.append("; created = ").append(this.creationDate);
+    sb.append("; server = ").append(this.isServer);
+    sb.append("; copyOnRead = ").append(this.copyOnRead);
+    sb.append("; lockLease = ").append(this.lockLease);
+    sb.append("; lockTimeout = ").append(this.lockTimeout);
     if (this.creationStack != null) {
-      sb.append("\nCreation context:\n");
+      sb.append(System.lineSeparator()).append("Creation context:").append(System.lineSeparator());
       OutputStream os = new OutputStream() {
         @Override
         public void write(int i) {
@@ -694,7 +666,7 @@ public class GemFireCacheImpl
     return instance;
   }
 
-  /*
+  /**
    * Used for testing, retain the old instance in the test and re-set the value when test completes
    */
   public static GemFireCacheImpl setInstanceForTests(GemFireCacheImpl cache) {
@@ -709,7 +681,7 @@ public class GemFireCacheImpl
    * @return the existing cache
    * @throws CacheClosedException if an existing cache can not be found.
    */
-  public static final GemFireCacheImpl getExisting() {
+  public static GemFireCacheImpl getExisting() {
     final GemFireCacheImpl result = instance;
     if (result != null && !result.isClosing) {
       return result;
@@ -748,15 +720,6 @@ public class GemFireCacheImpl
     return result;
   }
 
-  // /**
-  // * @deprecated remove when Lise allows a Hydra VM to
-  // * be re-created
-  // */
-  // public static void clearInstance() {
-  // System.err.println("DEBUG: do not commit GemFireCache#clearInstance");
-  // instance = null;
-  // }
-
   public static GemFireCacheImpl createClient(DistributedSystem system, PoolFactory pf,
       CacheConfig cacheConfig) {
     return basicCreate(system, true, cacheConfig, pf, true, ASYNC_EVENT_LISTENERS, null);
@@ -766,7 +729,7 @@ public class GemFireCacheImpl
     return basicCreate(system, true, cacheConfig, null, false, ASYNC_EVENT_LISTENERS, null);
   }
 
-  public static GemFireCacheImpl createWithAsyncEventListeners(DistributedSystem system,
+  static GemFireCacheImpl createWithAsyncEventListeners(DistributedSystem system,
       CacheConfig cacheConfig, TypeRegistry typeRegistry) {
     return basicCreate(system, true, cacheConfig, null, false, true, typeRegistry);
   }
@@ -776,8 +739,6 @@ public class GemFireCacheImpl
     return basicCreate(system, existingOk, cacheConfig, null, false, ASYNC_EVENT_LISTENERS, null);
   }
 
-
-
   private static GemFireCacheImpl basicCreate(DistributedSystem system, boolean existingOk,
       CacheConfig cacheConfig, PoolFactory pf, boolean isClient, boolean asyncEventListeners,
       TypeRegistry typeRegistry) throws CacheExistsException, TimeoutException,
@@ -793,7 +754,7 @@ public class GemFireCacheImpl
         return instance;
       }
     } catch (CacheXmlException | IllegalArgumentException e) {
-      logger.error(e.getLocalizedMessage());
+      logger.error(e.getLocalizedMessage()); // TODO: log the full stack trace or not?
       throw e;
     } catch (Error | RuntimeException e) {
       logger.error(e);
@@ -821,15 +782,15 @@ public class GemFireCacheImpl
   }
 
   /**
-   * Creates a new instance of GemFireCache and populates it according to the
-   * <code>cache.xml</code>, if appropriate.
+   * Creates a new instance of GemFireCache and populates it according to the {@code cache.xml}, if
+   * appropriate.
    * 
    * @param typeRegistry: currently only unit tests set this parameter to a non-null value
    */
   private GemFireCacheImpl(boolean isClient, PoolFactory pf, DistributedSystem system,
       CacheConfig cacheConfig, boolean asyncEventListeners, TypeRegistry typeRegistry) {
     this.isClient = isClient;
-    this.clientpf = pf;
+    this.poolFactory = pf;
     this.cacheConfig = cacheConfig; // do early for bug 43213
     this.pdxRegistry = typeRegistry;
 
@@ -846,28 +807,25 @@ public class GemFireCacheImpl
         // We only support management on members of a distributed system
         // Should do this: if (!getSystem().isLoner()) {
         // but it causes quickstart.CqClientTest to hang
-        this.listener = new ManagementListener();
-        this.system.addResourceListener(listener);
+        this.resourceEventsListener = new ManagementListener();
+        this.system.addResourceListener(this.resourceEventsListener);
         if (this.system.isLoner()) {
           this.system.getInternalLogWriter()
               .info(LocalizedStrings.GemFireCacheImpl_RUNNING_IN_LOCAL_MODE);
         }
       } else {
-        getLogger().info("Running in client mode");
-        this.listener = null;
+        logger.info("Running in client mode");
+        this.resourceEventsListener = null;
       }
 
       // Don't let admin-only VMs create Cache's just yet.
-      DM dm = this.system.getDistributionManager();
-      if (dm instanceof DistributionManager) {
-        if (((DistributionManager) dm).getDMType() == DistributionManager.ADMIN_ONLY_DM_TYPE) {
-          throw new IllegalStateException(
-              LocalizedStrings.GemFireCache_CANNOT_CREATE_A_CACHE_IN_AN_ADMINONLY_VM
-                  .toLocalizedString());
-        }
+      if (this.dm.getDMType() == DistributionManager.ADMIN_ONLY_DM_TYPE) {
+        throw new IllegalStateException(
+            LocalizedStrings.GemFireCache_CANNOT_CREATE_A_CACHE_IN_AN_ADMINONLY_VM
+                .toLocalizedString());
       }
 
-      this.rootRegions = new HashMap();
+      this.rootRegions = new HashMap<>();
 
       this.cqService = CqServiceProvider.create(this);
 
@@ -875,44 +833,39 @@ public class GemFireCacheImpl
       this.cachePerfStats = new CachePerfStats(system);
       CachePerfStats.enableClockStats = this.system.getConfig().getEnableTimeStatistics();
 
-      this.txMgr = new TXManagerImpl(this.cachePerfStats, this);
-      dm.addMembershipListener(this.txMgr);
+      this.transactionManager = new TXManagerImpl(this.cachePerfStats, this);
+      this.dm.addMembershipListener(this.transactionManager);
 
       this.creationDate = new Date();
 
       this.persistentMemberManager = new PersistentMemberManager();
 
       if (asyncEventListeners) {
-        final ThreadGroup group =
+        final ThreadGroup threadGroup =
             LoggingThreadGroup.createThreadGroup("Message Event Threads", logger);
-        ThreadFactory tf = new ThreadFactory() {
-          @Override
-          public Thread newThread(final Runnable command) {
-            final Runnable r = new Runnable() {
-              @Override
-              public void run() {
-                ConnectionTable.threadWantsSharedResources();
-                command.run();
-              }
-            };
-            Thread thread = new Thread(group, r, "Message Event Thread");
-            thread.setDaemon(true);
-            return thread;
-          }
+        ThreadFactory threadFactory = (Runnable command) -> {
+          final Runnable runnable = () -> {
+            ConnectionTable.threadWantsSharedResources();
+            command.run();
+          };
+          Thread thread = new Thread(threadGroup, runnable, "Message Event Thread");
+          thread.setDaemon(true);
+          return thread;
         };
-        ArrayBlockingQueue q = new ArrayBlockingQueue(EVENT_QUEUE_LIMIT);
-        this.eventThreadPool = new PooledExecutorWithDMStats(q, EVENT_THREAD_LIMIT,
-            this.cachePerfStats.getEventPoolHelper(), tf, 1000);
+        ArrayBlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(EVENT_QUEUE_LIMIT);
+        this.eventThreadPool = new PooledExecutorWithDMStats(queue, EVENT_THREAD_LIMIT,
+            this.cachePerfStats.getEventPoolHelper(), threadFactory, 1000);
       } else {
         this.eventThreadPool = null;
       }
 
       // Initialize the advisor here, but wait to exchange profiles until cache is fully built
       this.resourceAdvisor = ResourceAdvisor.createResourceAdvisor(this);
+
       // Initialize the advisor here, but wait to exchange profiles until cache is fully built
       this.jmxAdvisor = JmxManagerAdvisor.createJmxManagerAdvisor(new JmxManagerAdvisee(this));
 
-      resourceManager = InternalResourceManager.createResourceManager(this);
+      this.resourceManager = InternalResourceManager.createResourceManager(this);
       this.serialNumber = DistributionAdvisor.createSerialNumber();
 
       getInternalResourceManager().addResourceListener(ResourceType.HEAP_MEMORY, getHeapEvictor());
@@ -925,15 +878,15 @@ public class GemFireCacheImpl
             getOffHeapEvictor());
       }
 
-      recordedEventSweeper = EventTracker.startTrackerServices(this);
-      tombstoneService = TombstoneService.initialize(this);
+      this.recordedEventSweeper = EventTracker.startTrackerServices(this);
+      this.tombstoneService = TombstoneService.initialize(this);
 
       TypeRegistry.init();
       basicSetPdxSerializer(this.cacheConfig.getPdxSerializer());
       TypeRegistry.open();
 
       if (!isClient()) {
-        // Initialize the QRM thread freqeuncy to default (1 second )to prevent spill
+        // Initialize the QRM thread frequency to default (1 second )to prevent spill
         // over from previous Cache , as the interval is stored in a static
         // volatile field.
         HARegionQueue.setMessageSyncInterval(HARegionQueue.DEFAULT_MESSAGE_SYNC_INTERVAL);
@@ -948,26 +901,28 @@ public class GemFireCacheImpl
       }
 
       this.txEntryStateFactory = TXEntryState.getFactory();
-      if (xmlParameterizationEnabled) {
-        /** If product properties file is available replace properties from there */
+      if (XML_PARAMETERIZATION_ENABLED) {
+        // If product properties file is available replace properties from there
         Properties userProps = this.system.getConfig().getUserDefinedProps();
         if (userProps != null && !userProps.isEmpty()) {
-          resolver = new CacheXmlPropertyResolver(false,
+          this.resolver = new CacheXmlPropertyResolver(false,
               PropertyResolver.NO_SYSTEM_PROPERTIES_OVERRIDE, userProps);
         } else {
-          resolver = new CacheXmlPropertyResolver(false,
+          this.resolver = new CacheXmlPropertyResolver(false,
               PropertyResolver.NO_SYSTEM_PROPERTIES_OVERRIDE, null);
         }
+      } else {
+        this.resolver = null;
       }
 
       SystemFailure.signalCacheCreate();
 
-      diskMonitor = new DiskStoreMonitor();
+      this.diskMonitor = new DiskStoreMonitor();
     } // synchronized
   }
 
   public boolean isRESTServiceRunning() {
-    return isRESTServiceRunning;
+    return this.isRESTServiceRunning;
   }
 
   public void setRESTServiceRunning(boolean isRESTServiceRunning) {
@@ -980,23 +935,25 @@ public class GemFireCacheImpl
    * @return RestAgent
    */
   public RestAgent getRestAgent() {
-    return restAgent;
+    return this.restAgent;
   }
 
-  /*****
+  /**
    * Request the shared configuration from the locator(s) which have the Cluster config service
    * running
    */
-  public ConfigurationResponse requestSharedConfiguration() {
+  private ConfigurationResponse requestSharedConfiguration() {
     final DistributionConfig config = this.system.getConfig();
 
-    if (!(dm instanceof DistributionManager))
+    if (!(this.dm instanceof DistributionManager)) {
       return null;
+    }
 
     // do nothing if this vm is/has locator or this is a client
-    if (((DistributionManager) dm).getDMType() == DistributionManager.LOCATOR_DM_TYPE || isClient
-        || Locator.getLocator() != null)
+    if (this.dm.getDMType() == DistributionManager.LOCATOR_DM_TYPE || this.isClient
+        || Locator.getLocator() != null) {
       return null;
+    }
 
     // can't simply return null if server is not using shared configuration, since we need to find
     // out
@@ -1015,13 +972,11 @@ public class GemFireCacheImpl
       return null;
     }
 
-
-    ConfigurationResponse response = null;
     List<String> locatorConnectionStrings = getSharedConfigLocatorConnectionStringList();
 
     try {
-      response = ClusterConfigurationLoader.requestConfigurationFromLocators(system.getConfig(),
-          locatorConnectionStrings);
+      ConfigurationResponse response = ClusterConfigurationLoader
+          .requestConfigurationFromLocators(this.system.getConfig(), locatorConnectionStrings);
 
       // log the configuration received from the locator
       logger.info(LocalizedMessage
@@ -1031,7 +986,7 @@ public class GemFireCacheImpl
       Configuration clusterConfig =
           response.getRequestedConfiguration().get(ClusterConfigurationService.CLUSTER_CONFIG);
       Properties clusterSecProperties =
-          (clusterConfig == null) ? new Properties() : clusterConfig.getGemfireProperties();
+          clusterConfig == null ? new Properties() : clusterConfig.getGemfireProperties();
 
       // If not using shared configuration, return null or throw an exception is locator is secured
       if (!config.getUseSharedConfiguration()) {
@@ -1064,15 +1019,10 @@ public class GemFireCacheImpl
     }
   }
 
-  public void deployJarsRecevedFromClusterConfiguration(ConfigurationResponse response) {
+  private void deployJarsReceivedFromClusterConfiguration(ConfigurationResponse response) {
     try {
       ClusterConfigurationLoader.deployJarsReceivedFromClusterConfiguration(this, response);
-    } catch (IOException e) {
-      throw new GemFireConfigException(
-          LocalizedStrings.GemFireCache_EXCEPTION_OCCURED_WHILE_DEPLOYING_JARS_FROM_SHARED_CONDFIGURATION
-              .toLocalizedString(),
-          e);
-    } catch (ClassNotFoundException e) {
+    } catch (IOException | ClassNotFoundException e) {
       throw new GemFireConfigException(
           LocalizedStrings.GemFireCache_EXCEPTION_OCCURED_WHILE_DEPLOYING_JARS_FROM_SHARED_CONDFIGURATION
               .toLocalizedString(),
@@ -1080,10 +1030,10 @@ public class GemFireCacheImpl
     }
   }
 
-
-  // When called, clusterProps and serverProps and key could not be null
-  public static boolean isMisConfigured(Properties clusterProps, Properties serverProps,
-      String key) {
+  /**
+   * When called, clusterProps and serverProps and key could not be null
+   */
+  static boolean isMisConfigured(Properties clusterProps, Properties serverProps, String key) {
     String clusterPropValue = clusterProps.getProperty(key);
     String serverPropValue = serverProps.getProperty(key);
 
@@ -1095,36 +1045,31 @@ public class GemFireCacheImpl
     if (StringUtils.isBlank(clusterPropValue))
       return true;
 
-    // at this point check for eqality
+    // at this point check for equality
     return !clusterPropValue.equals(serverPropValue);
   }
 
-  public List<String> getSharedConfigLocatorConnectionStringList() {
-    List<String> locatorConnectionStringList = new ArrayList<String>();
+  private List<String> getSharedConfigLocatorConnectionStringList() {
+    List<String> locatorConnectionStringList = new ArrayList<>();
 
-    Map<InternalDistributedMember, Collection<String>> scl =
+    Map<InternalDistributedMember, Collection<String>> locatorsWithClusterConfig =
         this.getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
 
     // If there are no locators with Shared configuration, that means the system has been started
     // without shared configuration
     // then do not make requests to the locators
-    if (!scl.isEmpty()) {
-      Set<Entry<InternalDistributedMember, Collection<String>>> locs = scl.entrySet();
+    if (!locatorsWithClusterConfig.isEmpty()) {
+      Set<Entry<InternalDistributedMember, Collection<String>>> locators =
+          locatorsWithClusterConfig.entrySet();
 
-      for (Entry<InternalDistributedMember, Collection<String>> loc : locs) {
+      for (Entry<InternalDistributedMember, Collection<String>> loc : locators) {
         Collection<String> locStrings = loc.getValue();
-        Iterator<String> locStringIter = locStrings.iterator();
-
-        while (locStringIter.hasNext()) {
-          locatorConnectionStringList.add(locStringIter.next());
-        }
+        locatorConnectionStringList.addAll(locStrings);
       }
     }
     return locatorConnectionStringList;
   }
 
-
-
   /**
    * Used by unit tests to force cache creation to use a test generated cache.xml
    */
@@ -1149,7 +1094,7 @@ public class GemFireCacheImpl
     return this.isClient || !getAllPools().isEmpty();
   }
 
-  private Collection<Pool> getAllPools() {
+  private static Collection<Pool> getAllPools() {
     Collection<Pool> pools = PoolManagerImpl.getPMI().getMap().values();
     for (Iterator<Pool> itr = pools.iterator(); itr.hasNext();) {
       PoolImpl pool = (PoolImpl) itr.next();
@@ -1168,8 +1113,8 @@ public class GemFireCacheImpl
     return this.defaultPool;
   }
 
-  private void setDefaultPool(Pool v) {
-    this.defaultPool = v;
+  private void setDefaultPool(Pool value) {
+    this.defaultPool = value;
   }
 
   /**
@@ -1184,9 +1129,7 @@ public class GemFireCacheImpl
     GemFireCacheImpl.instance = this;
     GemFireCacheImpl.pdxInstance = this;
 
-    for (Iterator<CacheLifecycleListener> iter = cacheLifecycleListeners.iterator(); iter
-        .hasNext();) {
-      CacheLifecycleListener listener = (CacheLifecycleListener) iter.next();
+    for (CacheLifecycleListener listener : cacheLifecycleListeners) {
       listener.cacheCreated(this);
     }
 
@@ -1194,24 +1137,23 @@ public class GemFireCacheImpl
 
     // request and check cluster configuration
     ConfigurationResponse configurationResponse = requestSharedConfiguration();
-    deployJarsRecevedFromClusterConfiguration(configurationResponse);
+    deployJarsReceivedFromClusterConfiguration(configurationResponse);
 
     // apply the cluster's properties configuration and initialize security using that configuration
     ClusterConfigurationLoader.applyClusterPropertiesConfiguration(this, configurationResponse,
-        system.getConfig());
+        this.system.getConfig());
 
     // first initialize the security service using the security properties
-    securityService.initSecurity(system.getConfig().getSecurityProps());
+    this.securityService.initSecurity(this.system.getConfig().getSecurityProps());
     // secondly if cacheConfig has a securityManager, use that instead
-    if (cacheConfig.getSecurityManager() != null) {
-      securityService.setSecurityManager(cacheConfig.getSecurityManager());
+    if (this.cacheConfig.getSecurityManager() != null) {
+      this.securityService.setSecurityManager(this.cacheConfig.getSecurityManager());
     }
     // if cacheConfig has a postProcessor, use that instead
-    if (cacheConfig.getPostProcessor() != null) {
-      securityService.setPostProcessor(cacheConfig.getPostProcessor());
+    if (this.cacheConfig.getPostProcessor() != null) {
+      this.securityService.setPostProcessor(this.cacheConfig.getPostProcessor());
     }
 
-
     SystemMemberCacheEventProcessor.send(this, Operation.CACHE_CREATE);
     this.resourceAdvisor.initializationGate();
 
@@ -1225,23 +1167,21 @@ public class GemFireCacheImpl
     // we will not be ready for all the events that cache.xml
     // processing can deliver (region creation, etc.).
     // This call may need to be moved inside initializeDeclarativeCache.
-    /** Entry to GemFire Management service **/
-    this.jmxAdvisor.initializationGate();
+    this.jmxAdvisor.initializationGate(); // Entry to GemFire Management service
 
     // this starts up the ManagementService, register and federate the internal beans
-    system.handleResourceEvent(ResourceEvent.CACHE_CREATE, this);
-
-    boolean completedCacheXml = false;
+    this.system.handleResourceEvent(ResourceEvent.CACHE_CREATE, this);
 
     initializeServices();
 
+    boolean completedCacheXml = false;
     try {
       if (configurationResponse == null) {
         // Deploy all the jars from the deploy working dir.
         ClassPathLoader.getLatest().getJarDeployer().loadPreviouslyDeployedJarsFromDisk();
       }
       ClusterConfigurationLoader.applyClusterXmlConfiguration(this, configurationResponse,
-          system.getConfig());
+          this.system.getConfig());
       initializeDeclarativeCache();
       completedCacheXml = true;
     } finally {
@@ -1256,7 +1196,7 @@ public class GemFireCacheImpl
       }
     }
 
-    this.clientpf = null;
+    this.poolFactory = null;
 
     startColocatedJmxManagerLocator();
 
@@ -1270,7 +1210,7 @@ public class GemFireCacheImpl
         DEFAULT_CLIENT_FUNCTION_TIMEOUT);
     clientFunctionTimeout = time >= 0 ? time : DEFAULT_CLIENT_FUNCTION_TIMEOUT;
 
-    isInitialized = true;
+    this.isInitialized = true;
   }
 
   /**
@@ -1282,35 +1222,35 @@ public class GemFireCacheImpl
     for (CacheService service : loader) {
       service.init(this);
       this.services.put(service.getInterface(), service);
-      system.handleResourceEvent(ResourceEvent.CACHE_SERVICE_CREATE, service);
+      this.system.handleResourceEvent(ResourceEvent.CACHE_SERVICE_CREATE, service);
     }
   }
 
   private boolean isNotJmxManager() {
-    return (this.system.getConfig().getJmxManagerStart() != true);
+    return !this.system.getConfig().getJmxManagerStart();
   }
 
   private boolean isServerNode() {
-    return (this.system.getDistributedMember().getVmKind() != DistributionManager.LOCATOR_DM_TYPE
+    return this.system.getDistributedMember().getVmKind() != DistributionManager.LOCATOR_DM_TYPE
         && this.system.getDistributedMember().getVmKind() != DistributionManager.ADMIN_ONLY_DM_TYPE
-        && !isClient());
+        && !isClient();
   }
 
   private void startRestAgentServer(GemFireCacheImpl cache) {
     if (this.system.getConfig().getStartDevRestApi() && isNotJmxManager() && isServerNode()) {
       this.restAgent = new RestAgent(this.system.getConfig());
-      restAgent.start(cache);
+      this.restAgent.start(cache);
     } else {
       this.restAgent = null;
     }
   }
 
   private void startMemcachedServer() {
-    int port = system.getConfig().getMemcachedPort();
+    int port = this.system.getConfig().getMemcachedPort();
     if (port != 0) {
-      String protocol = system.getConfig().getMemcachedProtocol();
+      String protocol = this.system.getConfig().getMemcachedProtocol();
       assert protocol != null;
-      String bindAddress = system.getConfig().getMemcachedBindAddress();
+      String bindAddress = this.system.getConfig().getMemcachedBindAddress();
       assert bindAddress != null;
       if (bindAddress.equals(DistributionConfig.DEFAULT_MEMCACHED_BIND_ADDRESS)) {
         logger.info(LocalizedMessage.create(
@@ -1328,9 +1268,9 @@ public class GemFireCacheImpl
   }
 
   private void startRedisServer() {
-    int port = system.getConfig().getRedisPort();
+    int port = this.system.getConfig().getRedisPort();
     if (port != 0) {
-      String bindAddress = system.getConfig().getRedisBindAddress();
+      String bindAddress = this.system.getConfig().getRedisBindAddress();
       assert bindAddress != null;
       if (bindAddress.equals(DistributionConfig.DEFAULT_REDIS_BIND_ADDRESS)) {
         getLoggerI18n().info(
@@ -1346,7 +1286,6 @@ public class GemFireCacheImpl
     }
   }
 
-
   @Override
   public URL getCacheXmlURL() {
     if (this.getMyId().getVmKind() == DistributionManager.LOCATOR_DM_TYPE) {
@@ -1356,11 +1295,11 @@ public class GemFireCacheImpl
     if (xmlFile == null) {
       xmlFile = this.system.getConfig().getCacheXmlFile();
     }
-    if ("".equals(xmlFile.getName())) {
+    if (xmlFile.getName().isEmpty()) {
       return null;
     }
 
-    URL url = null;
+    URL url;
     if (!xmlFile.exists() || !xmlFile.isFile()) {
       // do a resource search
       String resource = xmlFile.getPath();
@@ -1372,7 +1311,7 @@ public class GemFireCacheImpl
     } else {
       try {
         url = xmlFile.toURL();
-      } catch (IOException ex) {
+      } catch (MalformedURLException ex) {
         throw new CacheXmlException(
             LocalizedStrings.GemFireCache_COULD_NOT_CONVERT_XML_FILE_0_TO_AN_URL
                 .toLocalizedString(xmlFile),
@@ -1386,7 +1325,7 @@ public class GemFireCacheImpl
           throw new CacheXmlException(
               LocalizedStrings.GemFireCache_DECLARATIVE_CACHE_XML_FILERESOURCE_0_DOES_NOT_EXIST
                   .toLocalizedString(xmlFile));
-        } else /* if (!xmlFile.isFile()) */ {
+        } else {
           throw new CacheXmlException(
               LocalizedStrings.GemFireCache_DECLARATIVE_XML_FILE_0_IS_NOT_A_FILE
                   .toLocalizedString(xmlFile));
@@ -1398,21 +1337,20 @@ public class GemFireCacheImpl
   }
 
   /**
-   * Initializes the contents of this <code>Cache</code> according to the declarative caching XML
-   * file specified by the given <code>DistributedSystem</code>. Note that this operation cannot be
-   * performed in the constructor because creating regions in the cache, etc. uses the cache itself
-   * (which isn't initialized until the constructor returns).
+   * Initializes the contents of this {@code Cache} according to the declarative caching XML file
+   * specified by the given {@code DistributedSystem}. Note that this operation cannot be performed
+   * in the constructor because creating regions in the cache, etc. uses the cache itself (which
+   * isn't initialized until the constructor returns).
    *
    * @throws CacheXmlException If something goes wrong while parsing the declarative caching XML
    *         file.
    * @throws TimeoutException If a {@link org.apache.geode.cache.Region#put(Object, Object)}times
    *         out while initializing the cache.
-   * @throws CacheWriterException If a <code>CacheWriterException</code> is thrown while
-   *         initializing the cache.
-   * @throws RegionExistsException If the declarative caching XML file desribes a region that
-   *         already exists (including the root region).
-   * @throws GatewayException If a <code>GatewayException</code> is thrown while initializing the
+   * @throws CacheWriterException If a {@code CacheWriterException} is thrown while initializing the
    *         cache.
+   * @throws RegionExistsException If the declarative caching XML file describes a region that
+   *         already exists (including the root region).
+   * @throws GatewayException If a {@code GatewayException} is thrown while initializing the cache.
    * 
    * @see #loadCacheXml
    */
@@ -1432,9 +1370,9 @@ public class GemFireCacheImpl
       return; // nothing needs to be done
     }
 
+    InputStream stream = null;
     try {
       logCacheXML(url, cacheXmlDescription);
-      InputStream stream = null;
       if (cacheXmlDescription != null) {
         if (logger.isTraceEnabled()) {
           logger.trace("initializing cache with generated XML: {}", cacheXmlDescription);
@@ -1444,40 +1382,57 @@ public class GemFireCacheImpl
         stream = url.openStream();
       }
       loadCacheXml(stream);
-      try {
-        stream.close();
-      } catch (IOException ignore) {
-      }
+
     } catch (IOException ex) {
       throw new CacheXmlException(
           LocalizedStrings.GemFireCache_WHILE_OPENING_CACHE_XML_0_THE_FOLLOWING_ERROR_OCCURRED_1
-              .toLocalizedString(new Object[] {url.toString(), ex}));
+              .toLocalizedString(url.toString(), ex));
 
     } catch (CacheXmlException ex) {
       CacheXmlException newEx =
           new CacheXmlException(LocalizedStrings.GemFireCache_WHILE_READING_CACHE_XML_0_1
-              .toLocalizedString(new Object[] {url, ex.getMessage()}));
+              .toLocalizedString(url, ex.getMessage()));
+      /*
+       * TODO: why use setStackTrace and initCause? removal breaks several tests: OplogRVVJUnitTest,
+       * NewDeclarativeIndexCreationJUnitTest CacheXml70DUnitTest, CacheXml80DUnitTest,
+       * CacheXml81DUnitTest, CacheXmlGeode10DUnitTest RegionManagementDUnitTest
+       */
       newEx.setStackTrace(ex.getStackTrace());
       newEx.initCause(ex.getCause());
       throw newEx;
+
+    } finally {
+      if (stream != null) {
+        try {
+          stream.close();
+        } catch (IOException ignore) {
+        }
+      }
     }
   }
 
-  private void logCacheXML(URL url, String cacheXmlDescription) {
+  private static void logCacheXML(URL url, String cacheXmlDescription) {
     if (cacheXmlDescription == null) {
       StringBuilder sb = new StringBuilder();
+      BufferedReader br = null;
       try {
-        final String EOLN = System.getProperty("line.separator");
-        BufferedReader br = new BufferedReader(new InputStreamReader(url.openStream()));
-        String l = br.readLine();
-        while (l != null) {
-          if (!l.isEmpty()) {
-            sb.append(EOLN).append(l);
+        final String lineSeparator = System.getProperty("line.separator");
+        br = new BufferedReader(new InputStreamReader(url.openStream()));
+        String line = br.readLine();
+        while (line != null) {
+          if (!line.isEmpty()) {
+            sb.append(lineSeparator).append(line);
           }
-          l = br.readLine();
+          line = br.readLine();
         }
-        br.close();
       } catch (IOException ignore) {
+      } finally {
+        if (br != null) {
+          try {
+            br.close();
+          } catch (IOException ignore) {
+          }
+        }
       }
       logger.info(
           LocalizedMessage.create(LocalizedStrings.GemFireCache_INITIALIZING_CACHE_USING__0__1,
@@ -1516,7 +1471,7 @@ public class GemFireCacheImpl
   }
 
   /**
-   * create diskstore factory with default attributes
+   * create diskStore factory with default attributes
    *
    * @since GemFire prPersistSprint2
    */
@@ -1526,7 +1481,7 @@ public class GemFireCacheImpl
   }
 
   /**
-   * create diskstore factory with predefined attributes
+   * create diskStore factory with predefined attributes
    *
    * @since GemFire prPersistSprint2
    */
@@ -1534,22 +1489,16 @@ public class GemFireCacheImpl
     return new DiskStoreFactoryImpl(this, attrs);
   }
 
-  protected class Stopper extends CancelCriterion {
+  class Stopper extends CancelCriterion {
 
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.geode.CancelCriterion#cancelInProgress()
-     */
     @Override
     public String cancelInProgress() {
-      String reason =
-          GemFireCacheImpl.this.getDistributedSystem().getCancelCriterion().cancelInProgress();
+      String reason = getDistributedSystem().getCancelCriterion().cancelInProgress();
       if (reason != null) {
         return reason;
       }
       if (GemFireCacheImpl.this.disconnectCause != null) {
-        return disconnectCause.getMessage();
+        return GemFireCacheImpl.this.disconnectCause.getMessage();
       }
       if (GemFireCacheImpl.this.isClosing) {
         return "The cache is closed."; // this + ": closed";
@@ -1557,42 +1506,37 @@ public class GemFireCacheImpl
       return null;
     }
 
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.geode.CancelCriterion#generateCancelledException(java.lang.Throwable)
-     */
     @Override
-    public RuntimeException generateCancelledException(Throwable e) {
+    public RuntimeException generateCancelledException(Throwable throwable) {
       String reason = cancelInProgress();
       if (reason == null) {
         return null;
       }
       RuntimeException result =
-          getDistributedSystem().getCancelCriterion().generateCancelledException(e);
+          getDistributedSystem().getCancelCriterion().generateCancelledException(throwable);
       if (result != null) {
         return result;
       }
       if (GemFireCacheImpl.this.disconnectCause == null) {
         // No root cause, specify the one given and be done with it.
-        return new CacheClosedException(reason, e);
+        return new CacheClosedException(reason, throwable);
       }
 
-      if (e == null) {
+      if (throwable == null) {
         // Caller did not specify any root cause, so just use our own.
         return new CacheClosedException(reason, GemFireCacheImpl.this.disconnectCause);
       }
 
       // Attempt to stick rootCause at tail end of the exception chain.
-      Throwable nt = e;
+      Throwable nt = throwable;
       while (nt.getCause() != null) {
         nt = nt.getCause();
       }
       try {
         nt.initCause(GemFireCacheImpl.this.disconnectCause);
-        return new CacheClosedException(reason, e);
+        return new CacheClosedException(reason, throwable);
       } catch (IllegalStateException e2) {
-        // Bug 39496 (Jrockit related) Give up. The following
+        // Bug 39496 (JRockit related) Give up. The following
         // error is not entirely sane but gives the correct general picture.
         return new CacheClosedException(reason, GemFireCacheImpl.this.disconnectCause);
       }
@@ -1603,7 +1547,7 @@ public class GemFireCacheImpl
 
   @Override
   public CancelCriterion getCancelCriterion() {
-    return stopper;
+    return this.stopper;
   }
 
   /** return true if the cache was closed due to being shunned by other members */
@@ -1676,8 +1620,8 @@ public class GemFireCacheImpl
   public static void emergencyClose() {
     final boolean DEBUG = SystemFailure.TRACE_CLOSE;
 
-    GemFireCacheImpl inst = GemFireCacheImpl.instance;
-    if (inst == null) {
+    GemFireCacheImpl cache = GemFireCacheImpl.instance;
+    if (cache == null) {
       if (DEBUG) {
         System.err.println("GemFireCache#emergencyClose: no instance");
       }
@@ -1687,10 +1631,9 @@ public class GemFireCacheImpl
     GemFireCacheImpl.instance = null;
     GemFireCacheImpl.pdxInstance = null;
     // leave the PdxSerializer set if we have one to prevent 43412
-    // TypeRegistry.setPdxSerializer(null);
 
     // Shut down messaging first
-    InternalDistributedSystem ids = inst.system;
+    InternalDistributedSystem ids = cache.system;
     if (ids != null) {
       if (DEBUG) {
         System.err.println("DEBUG: emergencyClose InternalDistributedSystem");
@@ -1698,20 +1641,18 @@ public class GemFireCacheImpl
       ids.emergencyClose();
     }
 
-    inst.disconnectCause = SystemFailure.getFailure();
-    inst.isClosing = true;
+    cache.disconnectCause = SystemFailure.getFailure();
+    cache.isClosing = true;
 
     // Clear cache servers
     if (DEBUG) {
       System.err.println("DEBUG: Close cache servers");
     }
     {
-      Iterator allCacheServersItr = inst.allCacheServers.iterator();
-      while (allCacheServersItr.hasNext()) {
-        CacheServerImpl bs = (CacheServerImpl) allCacheServersItr.next();
-        AcceptorImpl ai = bs.getAcceptor();
-        if (ai != null) {
-          ai.emergencyClose();
+      for (CacheServerImpl cacheServer : cache.allCacheServers) {
+        AcceptorImpl acceptor = cacheServer.getAcceptor();
+        if (acceptor != null) {
+          acceptor.emergencyClose();
         }
       }
     }
@@ -1725,16 +1666,13 @@ public class GemFireCacheImpl
       System.err.println("DEBUG: closing gateway hubs");
     }
 
-    // These are synchronized sets -- avoid potential deadlocks
-    // instance.pathToRegion.clear(); // garbage collection
-    // instance.gatewayHubs.clear();
-
     // rootRegions is intentionally *not* synchronized. The
     // implementation of clear() does not currently allocate objects.
-    inst.rootRegions.clear();
+    cache.rootRegions.clear();
+
     // partitionedRegions is intentionally *not* synchronized, The
     // implementation of clear() does not currently allocate objects.
-    inst.partitionedRegions.clear();
+    cache.partitionedRegions.clear();
     if (DEBUG) {
       System.err.println("DEBUG: done with cache emergency close");
     }
@@ -1742,7 +1680,7 @@ public class GemFireCacheImpl
 
   @Override
   public boolean isCacheAtShutdownAll() {
-    return isShutDownAll.get();
+    return this.isShutDownAll.get();
   }
 
   /**
@@ -1752,7 +1690,7 @@ public class GemFireCacheImpl
   private static final int shutdownAllPoolSize =
       Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "SHUTDOWN_ALL_POOL_SIZE", -1);
 
-  void shutdownSubTreeGracefully(Map<String, PartitionedRegion> prSubMap) {
+  private void shutdownSubTreeGracefully(Map<String, PartitionedRegion> prSubMap) {
     for (final PartitionedRegion pr : prSubMap.values()) {
       shutDownOnePRGracefully(pr);
     }
@@ -1782,27 +1720,23 @@ public class GemFireCacheImpl
         boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");
 
         if (testIGE) {
-          InternalGemFireError assErr = new InternalGemFireError(
+          throw new InternalGemFireError(
               LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
-          throw assErr;
         }
 
-        // bug 44031 requires multithread shutdownall should be grouped
+        // bug 44031 requires multithread shutDownAll should be grouped
         // by root region. However, shutDownAllDuringRecovery.conf test revealed that
         // we have to close colocated child regions first.
         // Now check all the PR, if anyone has colocate-with attribute, sort all the
         // PRs by colocation relationship and close them sequentially, otherwise still
         // group them by root region.
-        TreeMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
+        SortedMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
         if (prTrees.size() > 1 && shutdownAllPoolSize != 1) {
           ExecutorService es = getShutdownAllExecutorService(prTrees.size());
           for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
-            es.execute(new Runnable() {
-              @Override
-              public void run() {
-                ConnectionTable.threadWantsSharedResources();
-                shutdownSubTreeGracefully(prSubMap);
-              }
+            es.execute(() -> {
+              ConnectionTable.threadWantsSharedResources();
+              shutdownSubTreeGracefully(prSubMap);
             });
           } // for each root
           es.shutdown();
@@ -1827,76 +1761,83 @@ public class GemFireCacheImpl
   }
 
   private ExecutorService getShutdownAllExecutorService(int size) {
-    final ThreadGroup thrGrp = LoggingThreadGroup.createThreadGroup("ShutdownAllGroup", logger);
-    ThreadFactory thrFactory = new ThreadFactory() {
+    final ThreadGroup threadGroup =
+        LoggingThreadGroup.createThreadGroup("ShutdownAllGroup", logger);
+    ThreadFactory threadFactory = new ThreadFactory() {
       private final AtomicInteger threadCount = new AtomicInteger(1);
 
       @Override
-      public Thread newThread(Runnable r) {
-        Thread t = new Thread(thrGrp, r, "ShutdownAll-" + threadCount.getAndIncrement());
-        t.setDaemon(true);
-        return t;
+      public Thread newThread(Runnable runnable) {
+        Thread thread =
+            new Thread(threadGroup, runnable, "ShutdownAll-" + this.threadCount.getAndIncrement());
+        thread.setDaemon(true);
+        return thread;
       }
     };
-    ExecutorService es = Executors
-        .newFixedThreadPool(shutdownAllPoolSize == -1 ? size : shutdownAllPoolSize, thrFactory);
-    return es;
+    return Executors.newFixedThreadPool(shutdownAllPoolSize == -1 ? size : shutdownAllPoolSize,
+        threadFactory);
   }
 
-  private void shutDownOnePRGracefully(PartitionedRegion pr) {
+  private void shutDownOnePRGracefully(PartitionedRegion partitionedRegion) {
     boolean acquiredLock = false;
     try {
-      pr.acquireDestroyLock();
+      partitionedRegion.acquireDestroyLock();
       acquiredLock = true;
 
-      synchronized (pr.getRedundancyProvider()) {
-        if (pr.isDataStore() && pr.getDataStore() != null
-            && pr.getDataPolicy() == DataPolicy.PERSISTENT_PARTITION) {
-          int numBuckets = pr.getTotalNumberOfBuckets();
-          Map<InternalDistributedMember, PersistentMemberID> bucketMaps[] = new Map[numBuckets];
-          PartitionedRegionDataStore prds = pr.getDataStore();
+      synchronized (partitionedRegion.getRedundancyProvider()) {
+        if (partitionedRegion.isDataStore() && partitionedRegion.getDataStore() != null
+            && partitionedRegion.getDataPolicy() == DataPolicy.PERSISTENT_PARTITION) {
+          int numBuckets = partitionedRegion.getTotalNumberOfBuckets();
+          Map<InternalDistributedMember, PersistentMemberID>[] bucketMaps = new Map[numBuckets];
+          PartitionedRegionDataStore dataStore = partitionedRegion.getDataStore();
 
           // lock all the primary buckets
-          Set<Entry<Integer, BucketRegion>> bucketEntries = prds.getAllLocalBuckets();
+          Set<Entry<Integer, BucketRegion>> bucketEntries = dataStore.getAllLocalBuckets();
           for (Map.Entry e : bucketEntries) {
-            BucketRegion br = (BucketRegion) e.getValue();
-            if (br == null || br.isDestroyed) {
+            BucketRegion bucket = (BucketRegion) e.getValue();
+            if (bucket == null || bucket.isDestroyed) {
               // bucket region could be destroyed in race condition
               continue;
             }
-            br.getBucketAdvisor().tryLockIfPrimary();
+            bucket.getBucketAdvisor().tryLockIfPrimary();
 
-            // get map <InternalDistriutedMemeber, persistentID> for this bucket's
+            // get map <InternalDistributedMember, persistentID> for this bucket's
             // remote members
-            bucketMaps[br.getId()] = br.getBucketAdvisor().adviseInitializedPersistentMembers();
+            bucketMaps[bucket.getId()] =
+                bucket.getBucketAdvisor().adviseInitializedPersistentMembers();
             if (logger.isDebugEnabled()) {
               logger.debug("shutDownAll: PR {}: initialized persistent members for {}:{}",
-                  pr.getName(), br.getId(), bucketMaps[br.getId()]);
+                  partitionedRegion.getName(), bucket.getId(), bucketMaps[bucket.getId()]);
             }
           }
           if (logger.isDebugEnabled()) {
-            logger.debug("shutDownAll: All buckets for PR {} are locked.", pr.getName());
+            logger.debug("shutDownAll: All buckets for PR {} are locked.",
+                partitionedRegion.getName());
           }
 
           // send lock profile update to other members
-          pr.setShutDownAllStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
-          new UpdateAttributesProcessor(pr).distribute(false);
-          pr.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
+          partitionedRegion.setShutDownAllStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
+          new UpdateAttributesProcessor(partitionedRegion).distribute(false);
+          partitionedRegion.getRegionAdvisor()
+              .waitForProfileStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
           if (logger.isDebugEnabled()) {
-            logger.debug("shutDownAll: PR {}: all bucketlock profiles received.", pr.getName());
+            logger.debug("shutDownAll: PR {}: all bucketLock profiles received.",
+                partitionedRegion.getName());
           }
 
           // if async write, do flush
-          if (!pr.getAttributes().isDiskSynchronous()) {
-            // several PRs might share the same diskstore, we will only flush once
+          if (!partitionedRegion.getAttributes().isDiskSynchronous()) {
+            // several PRs might share the same diskStore, we will only flush once
             // even flush is called several times.
-            pr.getDiskStore().forceFlush();
+            partitionedRegion.getDiskStore().forceFlush();
             // send flush profile update to other members
-            pr.setShutDownAllStatus(PartitionedRegion.DISK_STORE_FLUSHED);
-            new UpdateAttributesProcessor(pr).distribute(false);
-            pr.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.DISK_STORE_FLUSHED);
+            partitionedRegion.setShutDownAllStatus(PartitionedRegion.DISK_STORE_FLUSHED);
+            new UpdateAttributesProcessor(partitionedRegion).distribute(false);
+            partitionedRegion.getRegionAdvisor()
+                .waitForProfileStatus(PartitionedRegion.DISK_STORE_FLUSHED);
             if (logger.isDebugEnabled()) {
-              logger.debug("shutDownAll: PR {}: all flush profiles received.", pr.getName());
+              logger.debug("shutDownAll: PR {}: all flush profiles received.",
+                  partitionedRegion.getName());
             }
           } // async write
 
@@ -1904,41 +1845,43 @@ public class GemFireCacheImpl
           // iterate through all the bucketMaps and exclude the items whose
           // idm is no longer online
           Set<InternalDistributedMember> membersToPersistOfflineEqual =
-              pr.getRegionAdvisor().adviseDataStore();
+              partitionedRegion.getRegionAdvisor().adviseDataStore();
           for (Map.Entry e : bucketEntries) {
-            BucketRegion br = (BucketRegion) e.getValue();
-            if (br == null || br.isDestroyed) {
+            BucketRegion bucket = (BucketRegion) e.getValue();
+            if (bucket == null || bucket.isDestroyed) {
               // bucket region could be destroyed in race condition
               continue;
             }
             Map<InternalDistributedMember, PersistentMemberID> persistMap =
-                getSubMapForLiveMembers(pr, membersToPersistOfflineEqual, bucketMaps[br.getId()]);
+                getSubMapForLiveMembers(membersToPersistOfflineEqual, bucketMaps[bucket.getId()]);
             if (persistMap != null) {
-              br.getPersistenceAdvisor().persistMembersOfflineAndEqual(persistMap);
+              bucket.getPersistenceAdvisor().persistMembersOfflineAndEqual(persistMap);
               if (logger.isDebugEnabled()) {
-                logger.debug("shutDownAll: PR {}: pesisting bucket {}:{}", pr.getName(), br.getId(),
-                    persistMap);
+                logger.debug("shutDownAll: PR {}: persisting bucket {}:{}",
+                    partitionedRegion.getName(), bucket.getId(), persistMap);
               }
             }
           }
 
-          // send persited profile update to other members, let all members to persist
+          // send persisted profile update to other members, let all members to persist
           // before close the region
-          pr.setShutDownAllStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
-          new UpdateAttributesProcessor(pr).distribute(false);
-          pr.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
+          partitionedRegion.setShutDownAllStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
+          new UpdateAttributesProcessor(partitionedRegion).distribute(false);
+          partitionedRegion.getRegionAdvisor()
+              .waitForProfileStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
           if (logger.isDebugEnabled()) {
-            logger.debug("shutDownAll: PR {}: all offline_equal profiles received.", pr.getName());
+            logger.debug("shutDownAll: PR {}: all offline_equal profiles received.",
+                partitionedRegion.getName());
           }
-        } // datastore
+        } // dataStore
 
-        // after done all steps for buckets, close pr
+        // after done all steps for buckets, close partitionedRegion
         // close accessor directly
-        RegionEventImpl event =
-            new RegionEventImpl(pr, Operation.REGION_CLOSE, null, false, getMyId(), true);
+        RegionEventImpl event = new RegionEventImpl(partitionedRegion, Operation.REGION_CLOSE, null,
+            false, getMyId(), true);
         try {
           // not to acquire lock
-          pr.basicDestroyRegion(event, false, false, true);
+          partitionedRegion.basicDestroyRegion(event, false, false, true);
         } catch (CacheWriterException e) {
           // not possible with local operation, CacheWriter not called
           throw new Error(
@@ -1952,36 +1895,33 @@ public class GemFireCacheImpl
                   .toLocalizedString(),
               e);
         }
-        // pr.close();
       } // synchronized
     } catch (CacheClosedException cce) {
       logger.debug("Encounter CacheClosedException when shutDownAll is closing PR: {}:{}",
-          pr.getFullPath(), cce.getMessage());
+          partitionedRegion.getFullPath(), cce.getMessage());
     } catch (CancelException ce) {
       logger.debug("Encounter CancelException when shutDownAll is closing PR: {}:{}",
-          pr.getFullPath(), ce.getMessage());
+          partitionedRegion.getFullPath(), ce.getMessage());
     } catch (RegionDestroyedException rde) {
       logger.debug("Encounter CacheDestroyedException when shutDownAll is closing PR: {}:{}",
-          pr.getFullPath(), rde.getMessage());
+          partitionedRegion.getFullPath(), rde.getMessage());
     } finally {
       if (acquiredLock) {
-        pr.releaseDestroyLock();
+        partitionedRegion.releaseDestroyLock();
       }
     }
   }
 
-  private Map<InternalDistributedMember, PersistentMemberID> getSubMapForLiveMembers(
-      PartitionedRegion pr, Set<InternalDistributedMember> membersToPersistOfflineEqual,
+  private static Map<InternalDistributedMember, PersistentMemberID> getSubMapForLiveMembers(
+      Set<InternalDistributedMember> membersToPersistOfflineEqual,
       Map<InternalDistributedMember, PersistentMemberID> bucketMap) {
     if (bucketMap == null) {
       return null;
     }
-    Map<InternalDistributedMember, PersistentMemberID> persistMap = new HashMap();
-    Iterator itor = membersToPersistOfflineEqual.iterator();
-    while (itor.hasNext()) {
-      InternalDistributedMember idm = (InternalDistributedMember) itor.next();
-      if (bucketMap.containsKey(idm)) {
-        persistMap.put(idm, bucketMap.get(idm));
+    Map<InternalDistributedMember, PersistentMemberID> persistMap = new HashMap<>();
+    for (InternalDistributedMember member : membersToPersistOfflineEqual) {
+      if (bucketMap.containsKey(member)) {
+        persistMap.put(member, bucketMap.get(member));
       }
     }
     return persistMap;
@@ -1992,13 +1932,13 @@ public class GemFireCacheImpl
     close(false);
   }
 
-  public void close(String reason, boolean keepalive, boolean keepDS) {
-    close(reason, null, keepalive, keepDS);
+  public void close(String reason, boolean keepAlive, boolean keepDS) {
+    close(reason, null, keepAlive, keepDS);
   }
 
   @Override
-  public void close(boolean keepalive) {
-    close("Normal disconnect", null, keepalive, false);
+  public void close(boolean keepAlive) {
+    close("Normal disconnect", null, keepAlive, false);
   }
 
   public void close(String reason, Throwable optionalCause) {
@@ -2098,7 +2038,7 @@ public class GemFireCacheImpl
 
   public OffHeapEvictor getOffHeapEvictor() {
     synchronized (this.offHeapEvictorLock) {
-      stopper.checkCancelInProgress(null);
+      this.stopper.checkCancelInProgress(null);
       if (this.offHeapEvictor == null) {
         this.offHeapEvictor = new OffHeapEvictor(this);
       }
@@ -2108,37 +2048,26 @@ public class GemFireCacheImpl
 
   @Override
   public PersistentMemberManager getPersistentMemberManager() {
-    return persistentMemberManager;
+    return this.persistentMemberManager;
   }
 
   @Override
   public ClientMetadataService getClientMetadataService() {
     synchronized (this.clientMetaDatServiceLock) {
-      stopper.checkCancelInProgress(null);
-      if (this.clientMetadatService == null) {
-        this.clientMetadatService = new ClientMetadataService(this);
+      this.stopper.checkCancelInProgress(null);
+      if (this.clientMetadataService == null) {
+        this.clientMetadataService = new ClientMetadataService(this);
       }
-      return this.clientMetadatService;
+      return this.clientMetadataService;
     }
   }
 
   private final boolean DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE = Boolean
       .getBoolean(DistributionConfig.GEMFIRE_PREFIX + "DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
 
-  /**
-   * close the cache
-   *
-   * @param reason the reason the cache is being closed
-   * @param systemFailureCause whether this member was ejected from the distributed system
-   * @param keepalive whoever added this should javadoc it
-   */
-  public void close(String reason, Throwable systemFailureCause, boolean keepalive) {
-    close(reason, systemFailureCause, keepalive, false);
-  }
-
-  public void close(String reason, Throwable systemFailureCause, boolean keepalive,
+  public void close(String reason, Throwable systemFailureCause, boolean keepAlive,
       boolean keepDS) {
-    securityService.close();
+    this.securityService.close();
 
     if (isClosed()) {
       return;
@@ -2146,7 +2075,7 @@ public class GemFireCacheImpl
     final boolean isDebugEnabled = logger.isDebugEnabled();
 
     synchronized (GemFireCacheImpl.class) {
-      // bugfix for bug 36512 "GemFireCache.close is not thread safe"
+      // fix for bug 36512 "GemFireCache.close is not thread safe"
       // ALL CODE FOR CLOSE SHOULD NOW BE UNDER STATIC SYNCHRONIZATION
       // OF synchronized (GemFireCache.class) {
       // static synchronization is necessary due to static resources
@@ -2154,14 +2083,14 @@ public class GemFireCacheImpl
         return;
       }
 
-      /**
+      /*
        * First close the ManagementService as it uses a lot of infra which will be closed by
        * cache.close()
-       **/
+       */
       system.handleResourceEvent(ResourceEvent.CACHE_REMOVE, this);
-      if (this.listener != null) {
-        this.system.removeResourceListener(listener);
-        this.listener = null;
+      if (this.resourceEventsListener != null) {
+        this.system.removeResourceListener(resourceEventsListener);
+        this.resourceEventsListener = null;
       }
 
       if (systemFailureCause != null) {
@@ -2173,7 +2102,7 @@ public class GemFireCacheImpl
         }
       }
 
-      this.keepAlive = keepalive;
+      this.keepAlive = keepAlive;
       isClosing = true;
       logger.info(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_NOW_CLOSING, this));
 
@@ -2191,12 +2120,12 @@ public class GemFireCacheImpl
       TXStateProxy tx = null;
       try {
 
-        if (this.txMgr != null) {
-          tx = this.txMgr.internalSuspend();
+        if (this.transactionManager != null) {
+          tx = this.transactionManager.internalSuspend();
         }
 
         // do this before closing regions
-        resourceManager.close();
+        this.resourceManager.close();
 
         try {
           this.resourceAdvisor.close();
@@ -2209,11 +2138,10 @@ public class GemFireCacheImpl
           // ignore
         }
 
-        GatewaySenderAdvisor advisor = null;
         for (GatewaySender sender : this.getAllGatewaySenders()) {
           try {
             sender.stop();
-            advisor = ((AbstractGatewaySender) sender).getSenderAdvisor();
+            GatewaySenderAdvisor advisor = ((AbstractGatewaySender) sender).getSenderAdvisor();
             if (advisor != null) {
               if (isDebugEnabled) {
                 logger.debug("Stopping the GatewaySender advisor");
@@ -2265,9 +2193,9 @@ public class GemFireCacheImpl
             GemFireCacheImpl.pdxInstance = null;
           }
 
-          List rootRegionValues = null;
+          List<LocalRegion> rootRegionValues;
           synchronized (this.rootRegions) {
-            rootRegionValues = new ArrayList(this.rootRegions.values());
+            rootRegionValues = new ArrayList<>(this.rootRegions.values());
           }
           {
             final Operation op;
@@ -2281,8 +2209,7 @@ public class GemFireCacheImpl
 
             LocalRegion prRoot = null;
 
-            for (Iterator itr = rootRegionValues.iterator(); itr.hasNext();) {
-              LocalRegion lr = (LocalRegion) itr.next();
+            for (LocalRegion lr : rootRegionValues) {
               if (isDebugEnabled) {
                 logger.debug("{}: processing region {}", this, lr.getFullPath());
               }
@@ -2338,7 +2265,7 @@ public class GemFireCacheImpl
                 LocalizedStrings.GemFireCache_FAILED_TO_GET_THE_CQSERVICE_TO_CLOSE_DURING_CACHE_CLOSE_1));
           }
 
-          PoolManager.close(keepalive);
+          PoolManager.close(keepAlive);
 
           if (isDebugEnabled) {
             logger.debug("{}: notifying admins of close...", this);
@@ -2360,7 +2287,7 @@ public class GemFireCacheImpl
           DM dm = null;
           try {
             dm = system.getDistributionManager();
-            dm.removeMembershipListener(this.txMgr);
+            dm.removeMembershipListener(this.transactionManager);
           } catch (CancelException e) {
             // dm = null;
           }
@@ -2390,7 +2317,7 @@ public class GemFireCacheImpl
           // NO MORE Distributed Messaging AFTER THIS POINT!!!!
 
           {
-            ClientMetadataService cms = this.clientMetadatService;
+            ClientMetadataService cms = this.clientMetadataService;
             if (cms != null) {
               cms.close();
             }
@@ -2403,20 +2330,6 @@ public class GemFireCacheImpl
           // make sure the disk stores get closed
           closeDiskStores();
           // NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
-
-          // okay, we're taking too long to do this stuff, so let's
-          // be mean to other processes and skip the rest of the messaging
-          // phase
-          // [bruce] the following code is unnecessary since someone put the
-          // same actions in a finally block
-          // if (!this.closed) {
-          // this.closed = true;
-          // this.txMgr.close();
-          // if (GemFireCache.instance == this) {
-          // GemFireCache.instance = null;
-          // }
-          // ((DynamicRegionFactoryImpl)DynamicRegionFactory.get()).close();
-          // }
         }
 
         // Close the CqService Handle.
@@ -2448,12 +2361,12 @@ public class GemFireCacheImpl
 
       } finally {
         // NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
-        if (this.txMgr != null) {
-          this.txMgr.close();
+        if (this.transactionManager != null) {
+          this.transactionManager.close();
         }
         ((DynamicRegionFactoryImpl) DynamicRegionFactory.get()).close();
-        if (this.txMgr != null) {
-          this.txMgr.internalResume(tx);
+        if (this.transactionManager != null) {
+          this.transactionManager.internalResume(tx);
         }
         TXCommitMessage.getTracker().clearForCacheClose();
       }
@@ -2470,8 +2383,7 @@ public class GemFireCacheImpl
       // do this late to prevent 43412
       TypeRegistry.setPdxSerializer(null);
 
-      for (Iterator iter = cacheLifecycleListeners.iterator(); iter.hasNext();) {
-        CacheLifecycleListener listener = (CacheLifecycleListener) iter.next();
+      for (CacheLifecycleListener listener : cacheLifecycleListeners) {
         listener.cacheClosed(this);
       }
       // Fix for #49856
@@ -2482,13 +2394,11 @@ public class GemFireCacheImpl
 
   }
 
-  // see Cache.isReconnecting()
   @Override
   public boolean isReconnecting() {
     return this.system.isReconnecting();
   }
 
-  // see Cache.waitUntilReconnected(long, TimeUnit)
   @Override
   public boolean waitUntilReconnected(long time, TimeUnit units) throws InterruptedException {
     boolean systemReconnected = this.system.waitUntilReconnected(time, units);
@@ -2496,10 +2406,7 @@ public class GemFireCacheImpl
       return false;
     }
     GemFireCacheImpl cache = getInstance();
-    if (cache == null || !cache.isInitialized()) {
-      return false;
-    }
-    return true;
+    return cache != null && cache.isInitialized();
   }
 
   @Override
@@ -2509,14 +2416,14 @@ public class GemFireCacheImpl
 
   @Override
   public Cache getReconnectedCache() {
-    GemFireCacheImpl c = GemFireCacheImpl.getInstance();
-    if (c == null) {
+    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+    if (cache == null) {
       return null;
     }
-    if (c == this || !c.isInitialized()) {
-      c = null;
+    if (cache == this || !cache.isInitialized()) {
+      cache = null;
     }
-    return c;
+    return cache;
   }
 
   private void stopMemcachedServer() {
@@ -2544,16 +2451,16 @@ public class GemFireCacheImpl
 
   private void prepareDiskStoresForClose() {
     String pdxDSName = TypeRegistry.getPdxDiskStoreName(this);
-    DiskStoreImpl pdxdsi = null;
+    DiskStoreImpl pdxDiskStore = null;
     for (DiskStoreImpl dsi : this.diskStores.values()) {
       if (dsi.getName().equals(pdxDSName)) {
-        pdxdsi = dsi;
+        pdxDiskStore = dsi;
       } else {
         dsi.prepareForClose();
       }
     }
-    if (pdxdsi != null) {
-      pdxdsi.prepareForClose();
+    if (pdxDiskStore != null) {
+      pdxDiskStore.prepareForClose();
     }
   }
 
@@ -2561,48 +2468,33 @@ public class GemFireCacheImpl
    * Used to guard access to compactorPool and set to true when cache is shutdown.
    */
   private final AtomicBoolean diskStoreTaskSync = new AtomicBoolean(false);
+
   /**
-   * Lazily initialized.
+   * Lazily initialized. TODO: this is always null
    */
   private ThreadPoolExecutor diskStoreTaskPool = null;
 
-  private void createDiskStoreTaskPool() {
-    int MAXT = DiskStoreImpl.MAX_CONCURRENT_COMPACTIONS;
-    final ThreadGroup compactThreadGroup =
-        LoggingThreadGroup.createThreadGroup("Oplog Compactor Thread Group", logger);
-    /*
-     * final ThreadFactory compactThreadFactory = new ThreadFactory() { public Thread
-     * newThread(Runnable command) { Thread thread = new Thread(compactThreadGroup, command,
-     * "Idle OplogCompactor"); thread.setDaemon(true); return thread; } };
-     */
+  private final ConcurrentMap<String, DiskStoreImpl> diskStores = new ConcurrentHashMap<>();
 
-    final ThreadFactory compactThreadFactory =
-        GemfireCacheHelper.CreateThreadFactory(compactThreadGroup, "Idle OplogCompactor");
-    this.diskStoreTaskPool = new ThreadPoolExecutor(MAXT, MAXT, 1, TimeUnit.SECONDS,
-        new LinkedBlockingQueue(), compactThreadFactory);
-  }
-
-  private final ConcurrentMap<String, DiskStoreImpl> diskStores =
-      new ConcurrentHashMap<String, DiskStoreImpl>();
   private final ConcurrentMap<String, DiskStoreImpl> regionOwnedDiskStores =
-      new ConcurrentHashMap<String, DiskStoreImpl>();
+      new ConcurrentHashMap<>();
 
-  public void addDiskStore(DiskStoreImpl dsi) {
+  void addDiskStore(DiskStoreImpl dsi) {
     this.diskStores.put(dsi.getName(), dsi);
     if (!dsi.isOffline()) {
       getDiskStoreMonitor().addDiskStore(dsi);
     }
   }
 
-  public void removeDiskStore(DiskStoreImpl dsi) {
+  void removeDiskStore(DiskStoreImpl dsi) {
     this.diskStores.remove(dsi.getName());
     this.regionOwnedDiskStores.remove(dsi.getName());
-    /** Added for M&M **/
+    // Added for M&M
     if (!dsi.getOwnedByRegion())
       system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
   }
 
-  public void addRegionOwnedDiskStore(DiskStoreImpl dsi) {
+  void addRegionOwnedDiskStore(DiskStoreImpl dsi) {
     this.regionOwnedDiskStores.put(dsi.getName(), dsi);
     if (!dsi.isOffline()) {
       getDiskStoreMonitor().addDiskStore(dsi);
@@ -2618,7 +2510,7 @@ public class GemFireCacheImpl
           logger.debug("closing {}", dsi);
         }
         dsi.close();
-        /** Added for M&M **/
+        // Added for M&M
         system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
       } catch (Exception e) {
         logger.fatal(
@@ -2635,13 +2527,6 @@ public class GemFireCacheImpl
     DEFAULT_DS_NAME = dsName;
   }
 
-  /**
-   * Used by unit tests to undo a change to the default disk store name.
-   */
-  public static void unsetDefaultDiskStoreName() {
-    DEFAULT_DS_NAME = DiskStoreFactory.DEFAULT_DISK_STORE_NAME;
-  }
-
   public static String getDefaultDiskStoreName() {
     return DEFAULT_DS_NAME;
   }
@@ -2687,138 +2572,49 @@ public class GemFireCacheImpl
 
   @Override
   public Collection<DiskStoreImpl> listDiskStoresIncludingRegionO

<TRUNCATED>