You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/06/02 05:16:21 UTC

[1/7] incubator-geode git commit: Change AssertionFailedError to AssertionError. General cleanup.

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-837 8b88a7464 -> bb91dedc4


Change AssertionFailedError to AssertionError. General cleanup.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/243a5c71
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/243a5c71
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/243a5c71

Branch: refs/heads/feature/GEODE-837
Commit: 243a5c71dace63feda4a0cb8e9c128f8e031df92
Parents: 8b88a74
Author: Kirk Lund <kl...@apache.org>
Authored: Wed Jun 1 21:54:42 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Jun 1 21:54:42 2016 -0700

----------------------------------------------------------------------
 .../cache30/CacheSerializableRunnable.java      | 28 ++++++--------------
 1 file changed, 8 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/243a5c71/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheSerializableRunnable.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheSerializableRunnable.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheSerializableRunnable.java
index 690bac2..78179cf 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheSerializableRunnable.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheSerializableRunnable.java
@@ -22,21 +22,15 @@ import com.gemstone.gemfire.cache.CacheRuntimeException;
 import com.gemstone.gemfire.test.dunit.RepeatableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
-import junit.framework.AssertionFailedError;
-
 /**
  * A helper class that provides the {@link SerializableRunnable}
  * class, but uses a {@link #run2} method instead that throws {@link
  * CacheException}.  This way, we don't need to have a lot of
  * try/catch code in the tests.
  *
- *
  * @since 3.0
  */
-public abstract class CacheSerializableRunnable
-  extends SerializableRunnable 
-  implements RepeatableRunnable 
-{
+public abstract class CacheSerializableRunnable extends SerializableRunnable implements RepeatableRunnable {
 
   /**
    * Creates a new <code>CacheSerializableRunnable</code> with the
@@ -50,7 +44,7 @@ public abstract class CacheSerializableRunnable
    * Creates a new <code>CacheSerializableRunnable</code> with the
    * given name
    */
-  public CacheSerializableRunnable(String name,Object[] args) {
+  public CacheSerializableRunnable(String name, Object[] args) {
     super(name);
     this.args = args;
   }
@@ -74,21 +68,21 @@ public abstract class CacheSerializableRunnable
   }
   
   /**
-   * Invokes the {@link #run} method.  If AssertionFailedError is thrown,
+   * Invokes the {@link #run} method.  If AssertionError is thrown,
    * and repeatTimeoutMs is >0, then repeat the {@link #run} method until
    * it either succeeds or repeatTimeoutMs milliseconds have passed.  The
-   * AssertionFailedError is only thrown to the caller if the last run
+   * AssertionError is only thrown to the caller if the last run
    * still throws it.
    */
   public final void runRepeatingIfNecessary(long repeatTimeoutMs) {
     long start = System.currentTimeMillis();
-    AssertionFailedError lastErr = null;
+    AssertionError lastErr = null;
     do {
       try {
         lastErr = null;
         this.run();
         CacheFactory.getAnyInstance().getLogger().fine("Completed " + this);
-      } catch (AssertionFailedError err) {
+      } catch (AssertionError err) {
         CacheFactory.getAnyInstance().getLogger().fine("Repeating " + this);
         lastErr = err;
         try {
@@ -109,19 +103,13 @@ public abstract class CacheSerializableRunnable
 
   public void run3() throws CacheException{}
 
-  /////////////////////////  Inner Classes  /////////////////////////
-
   /**
    * An exception that wraps a {@link CacheException}
    */
-  public static class CacheSerializableRunnableException 
-    extends CacheRuntimeException {
+  public static class CacheSerializableRunnableException extends CacheRuntimeException {
 
-    public CacheSerializableRunnableException(String message,
-                                              Throwable cause) {
+    public CacheSerializableRunnableException(String message, Throwable cause) {
       super(message, cause);
     }
-
   }
-
 }


[4/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
index e4aa706..391aed0 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.io.BufferedInputStream;
 import java.io.DataInputStream;
 import java.io.File;
@@ -25,13 +27,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.Region;
@@ -45,33 +43,24 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * @since 5.1
  */
 @Category(IntegrationTest.class)
-public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
-{
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-  
-  private static int EMPTY_RVV_SIZE = 6;
-
-//  private static final boolean debug = false;
-
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
-    diskProps.setDiskDirs(dirs);
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase {
 
+  private static int EMPTY_RVV_SIZE = 6;
   private static int ENTRY_SIZE = 1024;
 
   private static boolean oplogsIDsNotifiedToRoll;
+
+  private boolean proceedWithRolling;
+  private boolean rollingDone;
+  private boolean verifiedOplogs;
+  private final Object verifiedSync = new Object();
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
   
-  boolean proceedWithRolling, rollingDone, verifiedOplogs;
-  final Object verifiedSync = new Object();
+  @Override
+  protected final void postSetUp() throws Exception {
+    diskProps.setDiskDirs(dirs);
+  }
 
   /**
    * Disk region recovery test for Persist only with sync writes. Test has four
@@ -82,11 +71,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * 1. Get and verify the entries put in STEP 1 and STEP 2. STEP 4: Create
    * cache. Create Region with the same name as that of in STEP 1. Get and
    * verify the entries put in STEP 1 and STEP 2.
-   * 
    */
   @Test
-  public void testDiskRegRecovery()
-  {
+  public void testDiskRegRecovery() {
     /**
      * STEP 1
      */
@@ -310,21 +297,18 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     getByteArrValZeroLnth("119", region);
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
 
-/**
+  /**
    * Disk region recovery test for Persist only with sync writes. Test has four
    * steps : STEP 1: Create cache. Create Region. Put entries. Close cache. STEP
    * 2: Create cache. Create Region with the same name as that of in STEP 1. Delete some entries.
    * Close the Cache   * 
    * 3: Again Create cache. Create Region with the same name as that of in STEP
    * 4) Verify that the entries got deleted
-   * 
    */
   @Test
-  public void testBug39989_1()
-  {
+  public void testBug39989_1() {
     /**
      * STEP 1
      */
@@ -409,9 +393,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     }   
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
-  
 
   /**
    * Disk region recovery test for Persist only with sync writes. Test has four
@@ -421,11 +403,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * Close the Cache   * 
    * 3: Again Create cache.
    * 4) check if the region creation is successful
-   * 
    */
   @Test
-  public void testBug39989_2()
-  {
+  public void testBug39989_2() {
     /**
      * STEP 1
      */
@@ -516,16 +496,12 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     }   
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
 
   /**
    * To validate the get operation performed on a byte array.
-   *  
    */
-
-  private void getByteArrVal(String key, Region region)
-  {
+  private void getByteArrVal(String key, Region region) {
     byte[] val = (byte[])region.get(key);
     //verify that the retrieved byte[] equals to the value put initially.
     // val should be an unitialized array of bytes of length 1024
@@ -537,10 +513,8 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
 
   /**
    * to validate the get operation performed on a byte array of length zero
-   *  
    */
-  private boolean getByteArrValZeroLnth(String key, Region region)
-  {
+  private boolean getByteArrValZeroLnth(String key, Region region) {
     Object val0 = null;
     byte[] val2 = new byte[0];
     try {
@@ -567,14 +541,12 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     return result;
   }
 
-  public void verifyOplogSizeZeroAfterRecovery(Region region)
-  {
+  private void verifyOplogSizeZeroAfterRecovery(Region region) {
     assertEquals(Oplog.OPLOG_MAGIC_SEQ_REC_SIZE*2 + Oplog.OPLOG_DISK_STORE_REC_SIZE*2 + EMPTY_RVV_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE*2, ((LocalRegion)region).getDiskRegion().testHook_getChild().getOplogSize());
   }
 
   @Test
-  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesTrue()
-  {
+  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesTrue() {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -628,8 +600,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesFalse()
-  {
+  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesFalse() {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -688,8 +659,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testEmptyRegionRecover()
-  {
+  public void testEmptyRegionRecover() {
     diskProps.setDiskDirs(dirs);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
     Assert.assertTrue(region.size() == 0);
@@ -714,8 +684,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testReadCorruptedFile()
-  {
+  public void testReadCorruptedFile() {
     diskProps.setDiskDirs(dirs);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
 
@@ -765,8 +734,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testForceCompactionForRegionWithRollingDisabled()
-      throws Exception {
+  public void testForceCompactionForRegionWithRollingDisabled() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setMaxOplogSize(2048+(18*2)+15*7);
     diskProps.setRolling(false);
@@ -837,12 +805,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * leaves it in created set & so when the compactor processes the created Set it 
    * thinks that the entry is now referenced in the any of the subsequent oplogs & thus
    * overwrites it with a byte[].
-   * @throws Exception
    */
-  
   @Test
-  public void testVestigialCreatesInOplog() throws Exception
-  {
+  public void testVestigialCreatesInOplog() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setMaxOplogSize(40);
     diskProps.setPersistBackup(true);
@@ -907,9 +872,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOnlyRecoverValuesTrue()
-      throws Exception
-  {
+  public void testDiskIDFieldsForPersistOnlyRecoverValuesTrue() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -985,13 +948,10 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
         System.clearProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
       }
     }
-
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOverFlowRecoverValuesTrue()
-      throws Exception
-  {
+  public void testDiskIDFieldsForPersistOverFlowRecoverValuesTrue() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -1067,8 +1027,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOnlyRecoverValuesFalse()
-      throws Exception {
+  public void testDiskIDFieldsForPersistOnlyRecoverValuesFalse() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -1145,8 +1104,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
  
   @Test
-  public void testDiskIDFieldsForPersistOverFlowRecoverValuesFalse()
-      throws Exception {
+  public void testDiskIDFieldsForPersistOverFlowRecoverValuesFalse() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -1224,7 +1182,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
 
   @Test
   public void testBug40375() throws Exception {
-      try {
+    try {
       diskProps.setDiskDirs(dirs);
       diskProps.setPersistBackup(true);
       diskProps.setSynchronous(true);
@@ -1249,66 +1207,65 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
       assertEquals(4, region.size());
       region.close();
-      }finally {
-
+    } finally {
       System.setProperty(DiskStoreImpl.COMPLETE_COMPACTION_BEFORE_TERMINATION_PROPERTY_NAME,"");
-        }
+    }
   }
      
   @Test
   public void testBug41340() throws Exception {
-       diskProps.setDiskDirs(dirs);
-       diskProps.setPersistBackup(true);
-       diskProps.setSynchronous(true);
-       diskProps.setRolling(true);
-       diskProps.setRegionName("testBug41340");
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       assertEquals(0, region.size());
-       //put some entries
-       region.put("0","0");
-       region.put("1","1");
-       region.put("2","2");
-       region.put("3","3");
-       
-       
-       //Create another oplog
-       DiskStore store = cache.findDiskStore(region.getAttributes().getDiskStoreName());
-       store.forceRoll();
-       
-       //Now create and destroy all of the entries in the new
-       //oplog. This should cause us to remove the CRF but leave
-       //the DRF, which has creates in reverse order. Now we have
-       //garbage destroys which have higher IDs than any crate
-       region.put("4","1");
-       region.put("5","2");
-       region.put("6","3");
-       region.destroy("0");
-       region.destroy("6");
-       region.destroy("5");
-       region.destroy("4");
-       
-       store.forceRoll();
-       
-       //Force a recovery
-       GemFireCacheImpl.getInstance().close();
-       cache = createCache();
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       assertEquals(3, region.size());
-       
-       //With bug 41340, this is reusing an oplog id.
-       region.put("7","7");
+     diskProps.setDiskDirs(dirs);
+     diskProps.setPersistBackup(true);
+     diskProps.setSynchronous(true);
+     diskProps.setRolling(true);
+     diskProps.setRegionName("testBug41340");
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+     assertEquals(0, region.size());
+     //put some entries
+     region.put("0","0");
+     region.put("1","1");
+     region.put("2","2");
+     region.put("3","3");
+
+
+     //Create another oplog
+     DiskStore store = cache.findDiskStore(region.getAttributes().getDiskStoreName());
+     store.forceRoll();
+
+     //Now create and destroy all of the entries in the new
+     //oplog. This should cause us to remove the CRF but leave
+     //the DRF, which has creates in reverse order. Now we have
+     //garbage destroys which have higher IDs than any crate
+     region.put("4","1");
+     region.put("5","2");
+     region.put("6","3");
+     region.destroy("0");
+     region.destroy("6");
+     region.destroy("5");
+     region.destroy("4");
+
+     store.forceRoll();
+
+     //Force a recovery
+     GemFireCacheImpl.getInstance().close();
+     cache = createCache();
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+     assertEquals(3, region.size());
+
+     //With bug 41340, this is reusing an oplog id.
+     region.put("7","7");
 //       region.close();
-       
-       //Force another recovery
-       GemFireCacheImpl.getInstance().close();
-       cache = createCache();
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       
-       //Make sure we didn't lose the entry
-       assertEquals(4, region.size());
-       assertEquals("7", region.get("7"));
-       region.close();
-     }
+
+     //Force another recovery
+     GemFireCacheImpl.getInstance().close();
+     cache = createCache();
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+
+     //Make sure we didn't lose the entry
+     assertEquals(4, region.size());
+     assertEquals("7", region.get("7"));
+     region.close();
+   }
   
   @Test
   public void testRecoverValuesFalse() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
index 350a588..84c8500 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
@@ -16,10 +16,7 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -28,7 +25,6 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -37,15 +33,11 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.internal.FileUtil;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
-/**
- *
- */
 @Category(IntegrationTest.class)
 public class DiskRegionAsyncRecoveryJUnitTest extends DiskRegionTestingBase {
 
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     DiskStoreObserver.setInstance(null);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
index 07bd273..51352d1 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
@@ -16,11 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -32,47 +29,33 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * if the the region attributes are changed after starting it again.
  * 
  * The behaviour should be predictable
- * 
- *
  */
 @Category(IntegrationTest.class)
-public class DiskRegionChangingRegionAttributesJUnitTest extends
-    DiskRegionTestingBase
-{
+public class DiskRegionChangingRegionAttributesJUnitTest extends DiskRegionTestingBase {
+
+  private DiskRegionProperties props;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     props = new DiskRegionProperties();
     props.setDiskDirs(dirs);
-    
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
   }
-  
 
-  private DiskRegionProperties props;
-  
-  private void createOverflowOnly(){
+  private void createOverflowOnly() {
     props.setOverFlowCapacity(1);
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,props);
   }
   
-  private void createPersistOnly(){
+  private void createPersistOnly() {
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,props, Scope.LOCAL);
   }
   
-  private void createPersistAndOverflow(){
+  private void createPersistAndOverflow() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,props); 
   }
   
   @Test
-  public void testOverflowOnlyAndThenPersistOnly(){
+  public void testOverflowOnlyAndThenPersistOnly() {
     createOverflowOnly();
     put100Int();
     region.close();
@@ -81,7 +64,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistOnlyAndThenOverflowOnly(){
+  public void testPersistOnlyAndThenOverflowOnly() {
     createPersistOnly();
     put100Int();
     region.close();
@@ -97,7 +80,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testOverflowOnlyAndThenPeristAndOverflow(){
+  public void testOverflowOnlyAndThenPeristAndOverflow() {
     createOverflowOnly();
     put100Int();
     region.close();
@@ -106,7 +89,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistAndOverflowAndThenOverflowOnly(){
+  public void testPersistAndOverflowAndThenOverflowOnly() {
     createPersistAndOverflow();
     put100Int();
     region.close();
@@ -119,7 +102,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
  @Test
-  public void testPersistOnlyAndThenPeristAndOverflow(){
+  public void testPersistOnlyAndThenPeristAndOverflow() {
    createPersistOnly();
    put100Int();
    region.close();
@@ -128,15 +111,11 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistAndOverflowAndThenPersistOnly(){
+  public void testPersistAndOverflowAndThenPersistOnly() {
     createPersistAndOverflow();
     put100Int();
     region.close();
     createPersistOnly();
     assertTrue(region.size()==100);
   }
-  
-  
-  
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
index 4d88f43..009e4df 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
@@ -16,25 +16,23 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.lang.reflect.Array;
 import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
 
+import junit.framework.Assert;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-import junit.framework.Assert;
-
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.DiskStore;
@@ -55,44 +53,71 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 /**
  * TODO: fails when running integrationTest from gradle command-line on Windows 7
  * 
-com.gemstone.gemfire.internal.cache.DiskRegionJUnitTest > testAssertionErrorIfMissingOplog FAILED
-    junit.framework.AssertionFailedError
-        at junit.framework.Assert.fail(Assert.java:55)
-        at junit.framework.Assert.assertTrue(Assert.java:22)
-        at junit.framework.Assert.assertTrue(Assert.java:31)
-        at com.gemstone.gemfire.internal.cache.DiskRegionJUnitTest.testAssertionErrorIfMissingOplog(DiskRegionJUnitTest.java:2630)
- * 
  * JUnit tests covering some miscellaneous functionalites of Disk Region.
  */
 @Category(IntegrationTest.class)
-public class DiskRegionJUnitTest extends DiskRegionTestingBase
-{
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+public class DiskRegionJUnitTest extends DiskRegionTestingBase {
+
+  private static volatile boolean hasNotified = false;
+  private static volatile boolean putsHaveStarted = false;
+
+  private volatile boolean exceptionOccured = false;
+  private volatile boolean finished = false;
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
+
+  private DiskRegionProperties diskProps1 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps2 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps3 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps4 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps5 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps6 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps7 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps8 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps9 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps10 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps11 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps12 = new DiskRegionProperties();
+
+  private Region region1;
+  private Region region2;
+  private Region region3;
+  private Region region4;
+  private Region region5;
+  private Region region6;
+  private Region region7;
+  private Region region8;
+  private Region region9;
+  private Region region10;
+  private Region region11;
+  private Region region12;
+
+  private boolean failed = false;
+
+  private int counter = 0;
+  private boolean hasBeenNotified = false;
+
+  @Override
+  protected final void postSetUp() throws Exception {
     this.exceptionOccured = false;
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
   }
 
   private static class MyCL extends CacheListenerAdapter {
     public EntryEvent lastEvent;
+    @Override
     public void afterDestroy(EntryEvent event) {
       this.lastEvent = event;
     }
   }
 
   @Test
-  public void testRemoveCorrectlyRecorded() {
+  public void testRemoveCorrectlyRecorded() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
     props.setOverFlowCapacity(1);
@@ -105,12 +130,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
     MyCL cacheListener = new MyCL();
     region.getAttributesMutator().addCacheListener(cacheListener);
-    try {
-      region.destroy("1");
-    }
-    catch (Exception e) {
-      fail("Exception not expected but did occur due to "+e);
-    }
+    region.destroy("1");
 
     // Make sure we don't get an old value when doing a destroy
     // of an entry that overflowed to disk.
@@ -131,7 +151,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       exceptionOccured = true;
     }
     
-    if(!exceptionOccured){
+    if (!exceptionOccured){
       fail("exception did not occur although was supposed to occur");
     }
 
@@ -140,20 +160,14 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     
     Assert.assertTrue(region.get("1")==null);
     region.destroyRegion();
-    
   }
-  
-  
-  
+
   /**
    * Tests if region overflows correctly and stats are create and updated
    * correctly.
-   *  
    */
   @Test
-  public void testDiskRegionOverflow()
-  {
-
+  public void testDiskRegionOverflow() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
     props.setOverFlowCapacity(100);
@@ -210,8 +224,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     }
   }
 
-  public void assertArrayEquals(Object expected, Object v)
-  {
+  private void assertArrayEquals(Object expected, Object v) {
     assertEquals(expected.getClass(), v.getClass());
     int vLength = Array.getLength(v);
     assertEquals(Array.getLength(expected), vLength);
@@ -223,12 +236,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   /**
    * test method for putting different objects and validating that they have
    * been correctly put
-   *  
    */
   @Test
-  public void testDifferentObjectTypePuts()
-  {
-
+  public void testDifferentObjectTypePuts() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
     props.setOverFlowCapacity(100);
@@ -244,33 +254,29 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
         region.put(s, s);
       }
       region.put("foobar", "junk");
-      try {
-        region.localDestroy("foobar");
-
-        region.put("foobar2", "junk");
-        dr.flushForTesting();
-        region.localDestroy("foobar2");
-        // test invalidate
-        region.put("invalid", "invalid");
-        dr.flushForTesting();
-        region.invalidate("invalid");
-        dr.flushForTesting();
-        assertTrue(region.containsKey("invalid")
-            && !region.containsValueForKey("invalid"));
-        total++;
-        // test local-invalidate
-        region.put("localinvalid", "localinvalid");
-        dr.flushForTesting();
-        region.localInvalidate("localinvalid");
-        dr.flushForTesting();
-        assertTrue(region.containsKey("localinvalid")
-            && !region.containsValueForKey("localinvalid"));
-        total++;
-      }
-      catch (EntryNotFoundException e) {
-        logWriter.error("Exception occured", e);
-        fail(" Entry not found although was expected to be there");
-      }
+
+      region.localDestroy("foobar");
+
+      region.put("foobar2", "junk");
+      dr.flushForTesting();
+      region.localDestroy("foobar2");
+      // test invalidate
+      region.put("invalid", "invalid");
+      dr.flushForTesting();
+      region.invalidate("invalid");
+      dr.flushForTesting();
+      assertTrue(region.containsKey("invalid")
+          && !region.containsValueForKey("invalid"));
+      total++;
+      // test local-invalidate
+      region.put("localinvalid", "localinvalid");
+      dr.flushForTesting();
+      region.localInvalidate("localinvalid");
+      dr.flushForTesting();
+      assertTrue(region.containsKey("localinvalid")
+          && !region.containsValueForKey("localinvalid"));
+      total++;
+
       // test byte[] values
       region.put("byteArray", new byte[0]);
       dr.flushForTesting();
@@ -286,13 +292,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       assertEquals(total, region.size());
     }
     cache.close();
-    try {
-      cache = createCache();
-    }
-    catch (Exception e) {
-      logWriter.error("Exception occured", e);
-      fail("Exception in trying to create a cache due to " + e);
-    }
+    cache = createCache();
     {
       Region region = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(
           cache, props);
@@ -300,30 +300,23 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       assertEquals(true, region.containsKey("invalid"));
       assertEquals(null, region.get("invalid"));
       assertEquals(false, region.containsValueForKey("invalid"));
-      try {
-        region.localDestroy("invalid");
-        total--;
-        assertTrue(region.containsKey("localinvalid")
-            && !region.containsValueForKey("localinvalid"));
-        region.localDestroy("localinvalid");
-        total--;
-        assertArrayEquals(new byte[0], region.get("byteArray"));
-        region.localDestroy("byteArray");
-        total--;
-        assertEquals("modified", region.get("modified"));
-        region.localDestroy("modified");
-        total--;
-      }
-      catch (EntryNotFoundException e) {
-        logWriter.error("Exception occured", e);
-        fail(" Entry not found although was expected to be there");
-      }
+
+      region.localDestroy("invalid");
+      total--;
+      assertTrue(region.containsKey("localinvalid")
+          && !region.containsValueForKey("localinvalid"));
+      region.localDestroy("localinvalid");
+      total--;
+      assertArrayEquals(new byte[0], region.get("byteArray"));
+      region.localDestroy("byteArray");
+      total--;
+      assertEquals("modified", region.get("modified"));
+      region.localDestroy("modified");
+      total--;
     }
   }
 
-
-  class DoesPut implements Runnable
-  {
+  private static class DoesPut implements Runnable {
 
     private Region region;
 
@@ -331,15 +324,14 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       region.put(new Integer(1), new Integer(2));
     }
 
   }
 
-  class DoesGet implements Runnable
-  {
+  private class DoesGet implements Runnable {
 
     private final Region region;
 
@@ -347,8 +339,8 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       synchronized (this.region) {
         if (!hasNotified) {
           try {
@@ -373,12 +365,8 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     } // run()
   }
 
-  boolean failed = false;
-  static volatile boolean hasNotified = false;
-
   @Test
-  public void testFaultingInRemovalFromAsyncBuffer()
-  {
+  public void testFaultingInRemovalFromAsyncBuffer() throws Exception {
     failed = false;
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
@@ -426,16 +414,8 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
   }
 
-  protected int counter = 0;
-
-  protected boolean hasBeenNotified = false;
-
-  volatile boolean finished = false;
-  
   @Test
-  public void testGetWhileRolling()
-  {
-
+  public void testGetWhileRolling() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
     props.setOverFlowCapacity(1);
@@ -446,8 +426,8 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
         cache, props);
 
     CacheObserverHolder.setInstance(new CacheObserverAdapter() {
-      public void beforeGoingToCompact()
-      {
+      @Override
+      public void beforeGoingToCompact() {
         synchronized (region) {
           region.notifyAll();
           hasBeenNotified = true;
@@ -456,30 +436,12 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     });
 
     Runnable get = new Runnable() {
-      public void run()
-      {
+      @Override
+      public void run() {
         int localCounter = 0;
         synchronized (region) {
           localCounter = counter;
           counter++;
-//          try {
-//            if (!hasBeenNotified) {
-//              long startTime = System.currentTimeMillis();
-//              region.wait(24000);
-//              long interval = System.currentTimeMillis() - startTime;
-//              if (interval > 24000) {
-//                failed = true;
-//                fail("Getter #" + localCounter + " took too long in going to join, it should have exited before 24000 ms");
-//              }
-//            }
-//
-//          }
-//          catch (InterruptedException e) {
-//            if (finished) {
-//              return;
-//            }
-//            fail("interrupted");
-//          }
         }
         int limit = ((localCounter * 1000) + 1000);
         for (int i = localCounter * 1000; i < limit; i++) {
@@ -495,8 +457,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
               return;
             }
             failed = true;
-            fail(" failed due to " + e);
-            logWriter.error("Exception occured but not failing test ", e); // NOTREACHED
+            throw new AssertionError("failed due to ", e);
           }
         }
 
@@ -545,12 +506,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * to the Max Oplog Size. In such situations , if during switch over , if the
    * Oplog to be rolled is added after function call of obtaining nextDir , a
    * dead lock occurs
-   * 
    */
-
   @Test
-  public void testSingleDirectoryNotHanging()
-  {
+  public void testSingleDirectoryNotHanging() throws Exception {
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     //setting to null will make only one directory
     File dir = new File("testSingleDirectoryNotHanging");
@@ -586,11 +544,8 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     closeDown();
   }
 
-  static volatile boolean putsHaveStarted = false;
-  
   @Test
-  public void testOperationGreaterThanMaxOplogSize()
-  {
+  public void testOperationGreaterThanMaxOplogSize() throws Exception {
     putsHaveStarted = false;
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     diskRegionProperties.setDiskDirs(dirs);
@@ -617,15 +572,13 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     if (puts.exceptionOccurred()) {
       fail(" Exception was not supposed to occur but did occur");
     }
-
   }
   
   /**
    * As we have relaxed the constraint of max dir size 
    */
   @Test
-  public void testOperationGreaterThanMaxDirSize()
-  {
+  public void testOperationGreaterThanMaxDirSize() throws Exception {
     putsHaveStarted = false;
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     diskRegionProperties.setRegionName("IGNORE_EXCEPTION_testOperationGreaterThanMaxDirSize");
@@ -669,7 +622,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * one op per oplog (which is caused by bug 42464).
    */
   @Test
-  public void testBug42464() {
+  public void testBug42464() throws Exception  {
     putsHaveStarted = false;
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     File[] myDirs = new File[] { dirs[0] };
@@ -727,10 +680,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     assertEquals(1, oplogs.size());
   }
 
-  protected volatile boolean exceptionOccured = false;
-
-  class Puts implements Runnable
-  {
+  private static class Puts implements Runnable {
 
     private int dataSize = 1024;
     private Region region;
@@ -753,6 +703,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       return putSuccessful[index];
     }
 
+    @Override
     public void run() {
       performPuts();
     }
@@ -783,9 +734,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testSingleDirectorySizeViolation()
-  {
-
+  public void testSingleDirectorySizeViolation() throws Exception {
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     diskRegionProperties.setRegionName("IGNORE_EXCEPTION_testSingleDirectorySizeViolation");
     //setting to null will make only one directory
@@ -824,8 +773,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * DiskRegDiskAccessExceptionTest : Disk region test for DiskAccessException.
    */
   @Test
-  public void testDiskFullExcep()
-  {
+  public void testDiskFullExcep() throws Exception {
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = (2048 + 500);
     diskDirSize1[1] = (2048 + 500);
@@ -849,14 +797,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
     final byte[] value = new byte[1024];
     Arrays.fill(value, (byte)77);
-    try {
-      for (int i = 0; i < 8; i++) {
-        region.put("" + i, value);
-      }
-    }
-    catch (DiskAccessException e) {
-      logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+
+    for (int i = 0; i < 8; i++) {
+      region.put("" + i, value);
     }
 
     // we should have put 2 values in each dir so the next one should not fit
@@ -883,8 +826,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Make sure if compaction is enabled that we can exceed the disk dir limit
    */
   @Test
-  public void testNoDiskFullExcep()
-  {
+  public void testNoDiskFullExcep() throws Exception {
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = (2048 + 500);
     diskDirSize1[1] = (2048 + 500);
@@ -931,12 +873,12 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
     assertEquals(false, cache.isClosed());
   }
+
   /**
    * DiskRegDiskAccessExceptionTest : Disk region test for DiskAccessException.
    */
   @Test
-  public void testDiskFullExcepOverflowOnly()
-  {
+  public void testDiskFullExcepOverflowOnly() throws Exception {
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = (2048 + 500);
     diskDirSize1[1] = (2048 + 500);
@@ -998,8 +940,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Make sure if compaction is enabled that we can exceed the disk dir limit
    */
   @Test
-  public void testNoDiskFullExcepOverflowOnly()
-  {
+  public void testNoDiskFullExcepOverflowOnly() throws Exception {
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = (2048 + 500);
     diskDirSize1[1] = (2048 + 500);
@@ -1055,8 +996,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * time, the operation should not get stuck or see Exception
    */
   @Test
-  public void testSynchModeAllowOperationToProceedEvenIfDiskSpaceIsNotSufficient()
-  {
+  public void testSynchModeAllowOperationToProceedEvenIfDiskSpaceIsNotSufficient() throws Exception {
     File[] dirs1 = null;
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
@@ -1089,8 +1029,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }// end of testSyncPersistRegionDAExp
 
   @Test
-  public void testAsynchModeAllowOperationToProceedEvenIfDiskSpaceIsNotSufficient()
-  {
+  public void testAsynchModeAllowOperationToProceedEvenIfDiskSpaceIsNotSufficient() throws Exception {
     File[] dirs1 = null;
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
@@ -1154,11 +1093,10 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
                 try {
                   ThreadUtils.join(t1, 60 * 1000);
                 }
-                catch (Exception ignore) {
-                  logWriter.error("Exception occured", ignore);
+                catch (Exception e) {
                   testFailed = true;
                   failureCause = "Test failed as the compactor thread not guaranteed to have not rolled the oplog";
-                  fail("Test failed as the compactor thread not guaranteed to have not rolled the oplog");
+                  throw new AssertionError("Test failed as the compactor thread not guaranteed to have not rolled the oplog", e);
                 }
               }
 
@@ -1201,60 +1139,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   /**
    * DiskRegDiskAttributesTest: This test is for testing Disk attributes set
    * programmatically
-   *  
    */
-
-  DiskRegionProperties diskProps1 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps2 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps3 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps4 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps5 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps6 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps7 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps8 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps9 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps10 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps11 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps12 = new DiskRegionProperties();
-
-  Region region1;
-
-  Region region2;
-
-  Region region3;
-
-  Region region4;
-
-  Region region5;
-
-  Region region6;
-
-  Region region7;
-
-  Region region8;
-
-  Region region9;
-
-  Region region10;
-
-  Region region11;
-
-  Region region12;
-
   @Test
-  public void testDiskRegDWAttrbts()
-  {
+  public void testDiskRegDWAttrbts() throws Exception {
     diskProps1.setDiskDirs(dirs);
     diskProps2.setDiskDirs(dirs);
     diskProps3.setDiskDirs(dirs);
@@ -1373,13 +1260,12 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     lr.getDiskStore().close();
     lr.getGemFireCache().removeDiskStore(lr.getDiskStore());
   }
+
   /**
    * DiskRegGetInvalidEntryTest: get invalid entry should return null.
-   *  
    */
   @Test
-  public void testDiskGetInvalidEntry()
-  {
+  public void testDiskGetInvalidEntry() throws Exception {
     Object getInvalidEnt = "some val";
 
     diskProps.setDiskDirsAndSizes(dirs, diskDirSize);
@@ -1398,17 +1284,14 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       }
     }
     catch (Exception e) {
-      logWriter.error("Exception occured but not expected", e);
-      fail("Failed while put:" + e.toString());
+      throw new AssertionError("Failed while put:", e);
     }
     // invalidate an entry
     try {
       region.invalidate("key1");
     }
     catch (Exception e) {
-
-      fail("Failed while invalidating:" + e.toString());
-
+      throw new AssertionError("Failed while invalidating:" + e.toString());
     }
     // get the invalid entry and verify that the value returned is null
     try {
@@ -1432,8 +1315,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * presented as a byte array
    */
   @Test
-  public void testDiskRegionByteArray()
-  {
+  public void testDiskRegionByteArray() throws Exception {
     Object val = null;
     diskProps.setPersistBackup(true);
     diskProps.setDiskDirs(dirs);
@@ -1483,9 +1365,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * SimpleDiskRegion.
    */
   @Test
-  public void testInstanceOfDiskRegion()
-  {
-
+  public void testInstanceOfDiskRegion() throws Exception {
     DiskRegionProperties diskProps = new DiskRegionProperties();
 
     diskProps.setDiskDirs(dirs); // dirs is an array of four dirs
@@ -1525,11 +1405,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
   /**
    * DiskRegionStatsJUnitTest :
-   *  
    */
   @Test
-  public void testStats()
-  {
+  public void testStats() throws Exception {
     final int overflowCapacity = 100;
     int counter = 0;
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
@@ -1568,11 +1446,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   /**
    * DiskRegOverflowOnlyNoFilesTest: Overflow only mode has no files of previous
    * run, during startup
-   *  
    */
   @Test
-  public void testOverflowOnlyNoFiles()
-  {
+  public void testOverflowOnlyNoFiles() throws Exception {
     diskProps.setTimeInterval(15000l);
     diskProps.setBytesThreshold(100000l);
     diskProps.setOverFlowCapacity(1000);
@@ -1618,7 +1494,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }//end of testOverflowOnlyNoFiles
 
   @Test
-  public void testPersistNoFiles() {
+  public void testPersistNoFiles() throws Exception {
     diskProps.setOverflow(false);
     diskProps.setRolling(false);
     diskProps.setDiskDirs(dirs);
@@ -1666,12 +1542,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Test to verify that DiskAccessException is not thrown if rolling has been enabled. The
    * test configurations will cause the disk to go full and wait for the compactor to release space. 
    * A DiskAccessException should not be thrown by this test
-   * 
-   * @throws Exception
    */
   @Test
-  public void testDiskAccessExceptionNotThrown() throws Exception
-  {
+  public void testDiskAccessExceptionNotThrown() throws Exception {
     File diskDir = new File("dir");
     diskDir.mkdir();
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
@@ -1701,11 +1574,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * If an entry which has just been written on the disk, sees clear just before
    * updating the LRULiist, then that deleted entry should not go into the
    * LRUList
-   * 
    */
   @Test
-  public void testClearInteractionWithLRUList_Bug37605()
-  {
+  public void testClearInteractionWithLRUList_Bug37605() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(true);
     props.setOverFlowCapacity(1);
@@ -1755,11 +1626,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * happened, the entry on which create op is going on was no longer valid, but
    * we would not be able to detect the conflict. The fix was to first clear the
    * region map & then reset the Htree Ref.
-   * 
    */
   @Test
-  public void testClearInteractionWithCreateOperation_Bug37606()
-  {
+  public void testClearInteractionWithCreateOperation_Bug37606() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(false);
     props.setRolling(false);
@@ -1815,8 +1684,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Similar test in case of 'update'
    */
   @Test
-  public void testClearInteractionWithUpdateOperation_Bug37606()
-  {
+  public void testClearInteractionWithUpdateOperation_Bug37606() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setOverflow(false);
     props.setRolling(false);
@@ -1870,44 +1738,37 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     }
   }
   
-
   /**
    * If IOException occurs while updating an entry in a persist only synch mode,
    * DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryUpdateInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSynchPersistOnlyForIOExceptionCase");
-      props.setOverflow(false);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);
-      
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      entryUpdateInSynchPersistTypeForIOExceptionCase(region);
+  public void testEntryUpdateInSynchPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSynchPersistOnlyForIOExceptionCase");
+    props.setOverflow(false);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    entryUpdateInSynchPersistTypeForIOExceptionCase(region);
   }
   
   /**
    * If IOException occurs while updating an entry in a persist overflow synch mode,
    * we should get DiskAccessException & region be destroyed
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryUpdateInSyncOverFlowPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSyncOverFlowPersistOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);     
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      entryUpdateInSynchPersistTypeForIOExceptionCase(region);
+  public void testEntryUpdateInSyncOverFlowPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSyncOverFlowPersistOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    entryUpdateInSynchPersistTypeForIOExceptionCase(region);
   }
   
   /**
@@ -1939,42 +1800,37 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   
   /**
    * If IOException occurs while invalidating an entry in a persist only synch mode,
-   *  DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryInvalidateInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryInvalidateInSynchPersistOnlyForIOExceptionCase");
-      props.setOverflow(false);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);      
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      entryInvalidateInSynchPersistTypeForIOExceptionCase(region);
+  public void testEntryInvalidateInSynchPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryInvalidateInSynchPersistOnlyForIOExceptionCase");
+    props.setOverflow(false);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    entryInvalidateInSynchPersistTypeForIOExceptionCase(region);
   }
   
   /**
    * If IOException occurs while invalidating an entry in a persist overflow synch mode,
-   *  DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryInvalidateInSynchPersistOverflowForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryInvalidateInSynchPersistOverflowForIOExceptionCase");
-      
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      entryInvalidateInSynchPersistTypeForIOExceptionCase(region);
+  public void testEntryInvalidateInSynchPersistOverflowForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryInvalidateInSynchPersistOverflowForIOExceptionCase");
+
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    entryInvalidateInSynchPersistTypeForIOExceptionCase(region);
   }
+
   /**
    * If IOException occurs while invalidating an entry in a persist only synch mode,
    * DiskAccessException should occur & region should be destroyed
@@ -2002,15 +1858,11 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
   
   /**
-   * 
    * If IOException occurs while creating an entry in a persist only synch mode,
-   *  DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryCreateInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {
+  public void testEntryCreateInSynchPersistOnlyForIOExceptionCase() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("IGNORE_EXCEPTION_testEntryCreateInSynchPersistOnlyForIOExceptionCase");
     props.setOverflow(false);
@@ -2022,15 +1874,11 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
   
   /**
-   * 
    * If IOException occurs while creating an entry in a persist overflow synch mode,
    * DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryCreateInSynchPersistOverflowForIOExceptionCase()
-      throws Exception {
+  public void testEntryCreateInSynchPersistOverflowForIOExceptionCase() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("IGNORE_EXCEPTION_testEntryCreateInSynchPersistOverflowForIOExceptionCase");
     props.setOverflow(true);
@@ -2069,13 +1917,10 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   
   /**
    * If IOException occurs while destroying an entry in a persist only synch mode,
-    DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryDestructionInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {
+  public void testEntryDestructionInSynchPersistOnlyForIOExceptionCase() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("IGNORE_EXCEPTION_testEntryDestructionInSynchPersistOnlyForIOExceptionCase");
     props.setOverflow(false);
@@ -2084,18 +1929,14 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     props.setPersistBackup(true); 
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
     entryDestructionInSynchPersistTypeForIOExceptionCase(region);
-    
   }
   
   /**
    * If IOException occurs while destroying an entry in a persist overflow synch mode,
    * DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryDestructionInSynchPersistOverflowForIOExceptionCase()
-      throws Exception {
+  public void testEntryDestructionInSynchPersistOverflowForIOExceptionCase() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("IGNORE_EXCEPTION_testEntryDestructionInSynchPersistOverflowForIOExceptionCase");
     props.setOverflow(true);
@@ -2104,7 +1945,6 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     props.setPersistBackup(true);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
     entryDestructionInSynchPersistTypeForIOExceptionCase(region);
-    
   }
   
   /**
@@ -2134,198 +1974,180 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   
   /**
    * If IOException occurs while updating an entry in a Overflow only synch mode,
-   *  DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryUpdateInSynchOverflowOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSynchOverflowOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(false);
-      props.setOverFlowCapacity(1);
-      region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
+  public void testEntryUpdateInSynchOverflowOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInSynchOverflowOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(false);
+    props.setOverFlowCapacity(1);
+    region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
 
-      region.create("key1", "value1");
-      region.create("key2", "value2");
-      ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
-      try {
-        //Update key1, so that key2 goes on disk & encounters an exception
-        region.put("key1", "value1'");
-        fail("Should have encountered DiskAccessException");
-      }
-      catch (DiskAccessException dae) {
-        //OK
-      }        
-      ((LocalRegion) region).getDiskStore().waitForClose();
-      assertTrue(cache.isClosed());
-      region = null;
+    region.create("key1", "value1");
+    region.create("key2", "value2");
+    ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
+    try {
+      //Update key1, so that key2 goes on disk & encounters an exception
+      region.put("key1", "value1'");
+      fail("Should have encountered DiskAccessException");
+    }
+    catch (DiskAccessException dae) {
+      //OK
+    }
+    ((LocalRegion) region).getDiskStore().waitForClose();
+    assertTrue(cache.isClosed());
+    region = null;
   }
   
   /**
    * If IOException occurs while creating an entry in a Overflow only synch mode,
    * DiskAccessException should occur & region should be destroyed
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryCreateInSynchOverflowOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryCreateInSynchOverflowOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(false);
-      props.setOverFlowCapacity(1);
-      region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
+  public void testEntryCreateInSynchOverflowOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryCreateInSynchOverflowOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(false);
+    props.setOverFlowCapacity(1);
+    region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
 
-      region.create("key1", "value1");
-      region.create("key2", "value2");
-      ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
-      try {
-        region.create("key3", "value3");
-        fail("Should have encountered DiskAccessException");
-      }
-      catch (DiskAccessException dae) {
-        //OK
-      }       
-      ((LocalRegion) region).getDiskStore().waitForClose();
-      assertTrue(cache.isClosed());
-      region = null;
+    region.create("key1", "value1");
+    region.create("key2", "value2");
+    ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
+    try {
+      region.create("key3", "value3");
+      fail("Should have encountered DiskAccessException");
+    }
+    catch (DiskAccessException dae) {
+      //OK
+    }
+    ((LocalRegion) region).getDiskStore().waitForClose();
+    assertTrue(cache.isClosed());
+    region = null;
   }
   
   /**
    * A deletion of an entry in overflow only mode should not cause
    * any eviction & hence no DiskAccessException 
-   * 
-   * @throws Exception
    */
   @Test
-  public void testEntryDeletionInSynchOverflowOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(false);
-      props.setOverFlowCapacity(1);
-      region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
+  public void testEntryDeletionInSynchOverflowOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(false);
+    props.setOverFlowCapacity(1);
+    region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props);
 
-      region.create("key1", "value1");
-      region.create("key2", "value2");
-      region.create("key3", "value3");
-      ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
-      try {
-        //Update key1, so that key2 goes on disk & encounters an exception
-        region.destroy("key1");
-        region.destroy("key3");          
-      }
-      catch (DiskAccessException dae) {
-        fail("Should not have encountered DiskAccessException");
-      }        
+    region.create("key1", "value1");
+    region.create("key2", "value2");
+    region.create("key3", "value3");
+    ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
+    try {
+      //Update key1, so that key2 goes on disk & encounters an exception
+      region.destroy("key1");
+      region.destroy("key3");
+    }
+    catch (DiskAccessException dae) {
+      fail("Should not have encountered DiskAccessException");
+    }
   }
   
   /**
    * If IOException occurs while updating an entry in an  Asynch mode,
-   *  DiskAccessException should occur & region should be destroyed
-   *
-   * 
-   * @throws Exception
+   * DiskAccessException should occur & region should be destroyed
    */
   @Test
-  public void testEntryUpdateInASynchPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInASynchPersistOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(false);
-      props.setBytesThreshold(48);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);
-      region = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache, props);
-      // Get the oplog handle & hence the underlying file & close it
-      UninterruptibleFileChannel oplogFileChannel = ((LocalRegion)region).getDiskRegion()
-          .testHook_getChild().getFileChannel();
-      oplogFileChannel.close();
-      
-      region.create("key1", new byte[16]);
-      region.create("key2", new byte[16]);       
+  public void testEntryUpdateInASynchPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testEntryUpdateInASynchPersistOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(false);
+    props.setBytesThreshold(48);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+    region = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache, props);
+    // Get the oplog handle & hence the underlying file & close it
+    UninterruptibleFileChannel oplogFileChannel = ((LocalRegion)region).getDiskRegion()
+        .testHook_getChild().getFileChannel();
+    oplogFileChannel.close();
 
-      DiskRegion dr = ((LocalRegion)region).getDiskRegion();
-      dr.flushForTesting();
-      //Join till the asynch writer terminates
-      if (!dr.testWaitForAsyncFlusherThread(2000)) {
-        fail("async flusher thread did not terminate");
+    region.create("key1", new byte[16]);
+    region.create("key2", new byte[16]);
+
+    DiskRegion dr = ((LocalRegion)region).getDiskRegion();
+    dr.flushForTesting();
+    //Join till the asynch writer terminates
+    if (!dr.testWaitForAsyncFlusherThread(2000)) {
+      fail("async flusher thread did not terminate");
+    }
+
+    Wait.waitForCriterion(new WaitCriterion() {
+      @Override
+      public boolean done() {
+        return cache.isClosed();
       }
 
-      Wait.waitForCriterion(new WaitCriterion() {
-        @Override
-        public boolean done() {
-          return cache.isClosed();
-        }
+      @Override
+      public String description() {
+        return "Waiting for region IGNORE_EXCEPTION_testEntryUpdateInASynchPersistOnlyForIOExceptionCase to be destroyed.";
+      }
+    }, 5000, 500, true);
 
-        @Override
-        public String description() {
-          return "Waiting for region IGNORE_EXCEPTION_testEntryUpdateInASynchPersistOnlyForIOExceptionCase to be destroyed.";
-        }                
-      }, 5000, 500, true);
-      
-      ((LocalRegion) region).getDiskStore().waitForClose();
-      assertTrue(cache.isClosed());
-      region = null;
+    ((LocalRegion) region).getDiskStore().waitForClose();
+    assertTrue(cache.isClosed());
+    region = null;
   }
   
   /**
    * If IOException occurs while updating an entry in an already initialized
    * DiskRegion ,then the bridge servers should not be stopped , if any running as
    * they are no clients connected to it.
-   * 
-   * @throws Exception
    */
   @Test
-  public void testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(true);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);
-  
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      CacheServer bs1 = cache.addCacheServer();
-      bs1.setPort(5555);
-      bs1.start();
+  public void testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(true);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
 
-      region.create("key1", new byte[16]);
-      region.create("key2", new byte[16]);
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    CacheServer bs1 = cache.addCacheServer();
+    bs1.setPort(5555);
+    bs1.start();
+
+    region.create("key1", new byte[16]);
+    region.create("key2", new byte[16]);
 //      Get the oplog handle & hence the underlying file & close it
-      UninterruptibleFileChannel oplogFileChannel = ((LocalRegion)region).getDiskRegion()
-          .testHook_getChild().getFileChannel();         
-      oplogFileChannel.close();
-      try {
-        region.put("key2", new byte[16]);
-      }catch(DiskAccessException dae) {
-        //OK expected
-      }
-      ((LocalRegion) region).getDiskStore().waitForClose();
-      assertTrue(cache.isClosed());        
-      region = null;
-      List bsRunning = cache.getCacheServers();
-      // [anil & bruce] the following assertion was changed to true because
-      // a disk access exception in a server should always stop the server
-      assertTrue(bsRunning.isEmpty());
+    UninterruptibleFileChannel oplogFileChannel = ((LocalRegion)region).getDiskRegion()
+        .testHook_getChild().getFileChannel();
+    oplogFileChannel.close();
+    try {
+      region.put("key2", new byte[16]);
+    }catch(DiskAccessException dae) {
+      //OK expected
+    }
+    ((LocalRegion) region).getDiskStore().waitForClose();
+    assertTrue(cache.isClosed());
+    region = null;
+    List bsRunning = cache.getCacheServers();
+    // [anil & bruce] the following assertion was changed to true because
+    // a disk access exception in a server should always stop the server
+    assertTrue(bsRunning.isEmpty());
   }
   
   @Test
-  public void testDummyByteBugDuringRegionClose_Bug40250()
-      throws Exception
-  {
+  public void testDummyByteBugDuringRegionClose_Bug40250() throws Exception {
     try {
       // Create a region with rolling enabled.
       DiskRegionProperties props = new DiskRegionProperties();
@@ -2395,63 +2217,58 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
       CacheObserverHolder.setInstance(new CacheObserverAdapter());
     }
-
   }
    
   /**
    * If IOException occurs while initializing a region 
    * ,then the bridge servers should not be stopped 
-   * 
-   * @throws Exception
    */
   @Test
-  public void testBridgeServerRunningInSynchPersistOnlyForIOExceptionCase()
-      throws Exception {    
-      DiskRegionProperties props = new DiskRegionProperties();
-      props.setRegionName("IGNORE_EXCEPTION_testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase");
-      props.setOverflow(true);
-      props.setRolling(true);
-      props.setDiskDirs(dirs);
-      props.setPersistBackup(true);
-      props.setMaxOplogSize(100000); // just needs to be bigger than 65550
+  public void testBridgeServerRunningInSynchPersistOnlyForIOExceptionCase() throws Exception {
+    DiskRegionProperties props = new DiskRegionProperties();
+    props.setRegionName("IGNORE_EXCEPTION_testBridgeServerStoppingInSynchPersistOnlyForIOExceptionCase");
+    props.setOverflow(true);
+    props.setRolling(true);
+    props.setDiskDirs(dirs);
+    props.setPersistBackup(true);
+    props.setMaxOplogSize(100000); // just needs to be bigger than 65550
 
-      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-      CacheServer bs1 = cache.addCacheServer();
-      bs1.setPort(5555);
-      bs1.start();      
-
-      region.create("key1", new byte[16]);
-      region.create("key2", new byte[16]);
-      //Get the oplog file path
-      UninterruptibleFileChannel oplogFileChnl = ((LocalRegion)region).getDiskRegion()
-      .testHook_getChild().getFileChannel();
-      //corrupt the opfile
-      oplogFileChnl.position(2);
-      ByteBuffer bf = ByteBuffer.allocate(416);
-      for(int i = 0; i <5;++i) {
-        bf.putInt(i);
-      }
-      bf.flip();
+    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+    CacheServer bs1 = cache.addCacheServer();
+    bs1.setPort(5555);
+    bs1.start();
+
+    region.create("key1", new byte[16]);
+    region.create("key2", new byte[16]);
+    //Get the oplog file path
+    UninterruptibleFileChannel oplogFileChnl = ((LocalRegion)region).getDiskRegion()
+    .testHook_getChild().getFileChannel();
+    //corrupt the opfile
+    oplogFileChnl.position(2);
+    ByteBuffer bf = ByteBuffer.allocate(416);
+    for(int i = 0; i <5;++i) {
+      bf.putInt(i);
+    }
+    bf.flip();
 //      Corrupt the oplogFile
-      oplogFileChnl.write(bf);
-      //Close the region
-      region.close();        
-      assertTrue(region.isDestroyed());        
-      try {
-        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
-        fail("expected DiskAccessException");
-      }catch(DiskAccessException dae) {
-        //OK expected          
-      }
-      assertTrue(region.isDestroyed());        
-      region = null;
-      List bsRunning = cache.getCacheServers();
-      assertTrue(!bsRunning.isEmpty());
+    oplogFileChnl.write(bf);
+    //Close the region
+    region.close();
+    assertTrue(region.isDestroyed());
+    try {
+      region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL);
+      fail("expected DiskAccessException");
+    }catch(DiskAccessException dae) {
+      //OK expected
+    }
+    assertTrue(region.isDestroyed());
+    region = null;
+    List bsRunning = cache.getCacheServers();
+    assertTrue(!bsRunning.isEmpty());
   }
 
   @Test
-  public void testEarlyTerminationOfCompactorByDefault()
-      throws Exception {
+  public void testEarlyTerminationOfCompactorByDefault() throws Exception {
     try {
       // Create a region with rolling enabled.
       DiskRegionProperties props = new DiskRegionProperties();
@@ -2601,11 +2418,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
       CacheObserverHolder.setInstance(new CacheObserverAdapter());
     }    
   }
-  
-  
+
   @Test
-  public void testAssertionErrorIfMissingOplog()
-      throws Exception {
+  public void testAssertionErrorIfMissingOplog() throws Exception {
     try {
       // Create a region with rolling enabled.
       DiskRegionProperties props = new DiskRegionProperties();
@@ -2669,8 +2484,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testNoTerminationOfCompactorTillRollingCompleted()
-      throws Exception {
+  public void testNoTerminationOfCompactorTillRollingCompleted() throws Exception {
     try {
       // Create a region with rolling enabled.
       System.getProperties().setProperty(DiskStoreImpl.COMPLETE_COMPACTION_BEFORE_TERMINATION_PROPERTY_NAME, "true");
@@ -2874,7 +2688,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testBug40648part1() {
+  public void testBug40648part1() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testBug40648part1");
     props.setRolling(true);
@@ -2898,7 +2712,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testBug40648part2() {
+  public void testBug40648part2() throws Exception {
     // Same as part1 but no persistence. I wasn't able to get part2
     // to fail but thought this was worth testing anyway.
     DiskRegionProperties props = new DiskRegionProperties();
@@ -2924,7 +2738,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testForceCompactionDoesRoll() {
+  public void testForceCompactionDoesRoll() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testForceCompactionDoesRoll");
     props.setRolling(false);
@@ -2955,7 +2769,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Confirm that forceCompaction waits for the compaction to finish
    */
   @Test
-  public void testNonDefaultCompaction() {
+  public void testNonDefaultCompaction() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testForceCompactionDoesRoll");
     props.setRolling(false);
@@ -2985,7 +2799,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
    * Confirm that forceCompaction waits for the compaction to finish
    */
   @Test
-  public void testForceCompactionIsSync() {
+  public void testForceCompactionIsSync() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testForceCompactionDoesRoll");
     props.setRolling(false);
@@ -3011,8 +2825,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testBug40876() throws Exception
-  {
+  public void testBug40876() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
 
     props.setRegionName("testBug40876");
@@ -3027,15 +2840,13 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     Object obj =((LocalRegion)this.region).getValueOnDiskOrBuffer("key1");
     assertEquals(Token.INVALID,obj);
     assertFalse(this.region.containsValueForKey("key1"));
-    
-     
   }
 
   /**
    * Make sure oplog created by recovery goes in the proper directory
    */
   @Test
-  public void testBug41822() {
+  public void testBug41822() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testBug41822");
     props.setRolling(false);
@@ -3104,7 +2915,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testBug41770() throws InterruptedException {
+  public void testBug41770() throws Exception {
     DiskRegionProperties props = new DiskRegionProperties();
     props.setRegionName("testBug41770");
     props.setOverflow(false);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
index 75eb9db..0019270 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
@@ -20,9 +20,7 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -36,7 +34,6 @@ import org.junit.Rule;
 import org.junit.rules.TestName;
 
 import com.gemstone.gemfire.LogWriter;
-import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.CacheTransactionManager;
@@ -54,33 +51,30 @@ import com.gemstone.gemfire.internal.cache.versions.VersionTag;
  * all tests are present here.
  * 
  * @since 5.1
- *
  */
-public class DiskRegionTestingBase
-{
-  @Rule public TestName name = new TestName();
-  
-   boolean testFailed = false;
-   String failureCause = "";
-  protected static Cache cache = null;
+public abstract class DiskRegionTestingBase {
 
+  protected static final boolean debug = false;
+
+  protected static Cache cache = null;
   protected static DistributedSystem ds = null;
   protected static Properties props = new Properties();
-
   protected static File[] dirs = null;
-
   protected static int[] diskDirSize = null;
 
-  protected Region region = null;
-  
-  protected static final boolean debug = false;
-
+  protected Region region;
   protected LogWriter logWriter;
 
+  boolean testFailed;
+  String failureCause = "";
+
+  @Rule
+  public TestName name = new TestName();
 
   @Before
-  public void setUp() throws Exception
-  {
+  public final void setUp() throws Exception {
+    preSetUp();
+
     props.setProperty("mcast-port", "0");
     props.setProperty("locators", "");
     props.setProperty("log-level", "config"); // to keep diskPerf logs smaller
@@ -121,14 +115,20 @@ public class DiskRegionTestingBase
     deleteFiles();
 
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
+
+    postSetUp();
+  }
+
+  protected void preSetUp() throws Exception {
+  }
+
+  protected void postSetUp() throws Exception {
   }
 
   @After
-  public void tearDown() throws Exception
-  {
-    /*if (cache != null && !cache.isClosed()) {
-      cache.close();
-    }*/
+  public final void tearDown() throws Exception {
+    preTearDown();
+
     try {
       if (cache != null && !cache.isClosed()) {
         for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext();) {
@@ -144,13 +144,6 @@ public class DiskRegionTestingBase
           catch (RegionDestroyedException e) {
             // ignore
           }
-          catch (VirtualMachineError e) {
-            SystemFailure.initiateFailure(e);
-            throw e;
-          }
-          catch (Throwable t) {
-            logWriter.error(t);
-          }
         }
       }
       
@@ -159,22 +152,20 @@ public class DiskRegionTestingBase
       }
     }
     finally {
-      try {
-        closeCache();
-      }
-      catch (VirtualMachineError e) {
-        SystemFailure.initiateFailure(e);
-        throw e;
-      }
-      catch (Throwable t) {
-        logWriter.error("Error in closing the cache ", t);
-        
-      }
+      closeCache();
     }
     ds.disconnect();
     //Asif : below is not needed but leave it
     deleteFiles();
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
+
+    postTearDown();
+  }
+
+  protected void preTearDown() throws Exception {
+  }
+
+  protected void postTearDown() throws Exception {
   }
 
   protected Cache createCache() {
@@ -210,8 +201,7 @@ public class DiskRegionTestingBase
   /**
    * cleans all the directory of all the files present in them
    */
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     closeDiskStores();
     for (int i = 0; i < dirs.length; i++) {
       System.out.println("trying to delete files in " + dirs[i].getAbsolutePath());
@@ -238,7 +228,6 @@ public class DiskRegionTestingBase
         }
       }
     }
-
   }
 
   protected static void closeDiskStores() {
@@ -251,34 +240,30 @@ public class DiskRegionTestingBase
    * clears and closes the region
    *  
    */
-
-  protected void closeDown()
-  {
-    try{
-      if(!region.isDestroyed()) {
+  protected void closeDown() {
+    try {
+      if (!region.isDestroyed()) {
         region.destroyRegion();
       }
-    }catch(Exception e) {
+    } catch(Exception e) {
       this.logWriter.error("DiskRegionTestingBase::closeDown:Exception in destroyiong the region",e);
     }
   }
 
   /**
    * puts a 100 integers into the region
-   *  
    */
-  protected void put100Int()
-  {
+  protected void put100Int() {
     for (int i = 0; i < 100; i++) {
       region.put(new Integer(i), new Integer(i));
     }
   }
+
   protected void verify100Int() {
     verify100Int(true);
   }
   
-  protected void verify100Int(boolean verifySize)
-  {
+  protected void verify100Int(boolean verifySize) {
     if (verifySize) {
       assertEquals(100,region.size());
     }
@@ -291,31 +276,26 @@ public class DiskRegionTestingBase
 
   /**
    * will keep on putting till region overflows
-   *  
    */
-  protected void putTillOverFlow(Region region)
-  {
+  protected void putTillOverFlow(Region region) {
     int i = 0;
     for (i = 0; i < 1010; i++) {
       region.put(new Integer(i + 200), new Integer(i + 200));
     }
   }
 
-  /*
+  /**
    * put an entry
-   *  
    */
-  protected void putForValidation(Region region)
-  {
+  protected void putForValidation(Region region) {
     final byte[] value = new byte[1024];
     region.put("testKey", value);
   }
 
-  /*
+  /**
    * get val from disk
    */
-  protected void validatePut(Region region)
-  {
+  protected void validatePut(Region region) {
     // flush data to disk
     ((LocalRegion)region).getDiskRegion().flushForTesting();
     try {
@@ -324,9 +304,7 @@ public class DiskRegionTestingBase
     catch (Exception ex) {
       ex.printStackTrace();
       fail("Failed to get the value on disk");
-
     }
-
   }
   
   protected HashMap<String, VersionTag> saveVersionTags(LocalRegion region) {
@@ -353,17 +331,15 @@ public class DiskRegionTestingBase
     }
   }
 
-  /** Since these are not visible to cache.diskPerf we add wrapper methods to
+  /**
+   * Since these are not visible to cache.diskPerf we add wrapper methods to
    * make the following parameters/visible
-   *
    */
-  public static void setCacheObserverCallBack()
-  {
+  public static void setCacheObserverCallBack() {
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
   }
 
-  public static void unSetCacheObserverCallBack()
-  {
+  public static void unSetCacheObserverCallBack() {
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
   }
 


[7/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
Change AssertionFailedError to AssertionError and general cleanup.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bb91dedc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bb91dedc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bb91dedc

Branch: refs/heads/feature/GEODE-837
Commit: bb91dedc474aad8b7dee7d08df7cdf299973c4bd
Parents: 243a5c7
Author: Kirk Lund <kl...@apache.org>
Authored: Wed Jun 1 21:55:23 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Jun 1 21:55:23 2016 -0700

----------------------------------------------------------------------
 .../query/functional/NestedQueryJUnitTest.java  |   88 +-
 .../gemfire/cache30/CacheXml30DUnitTest.java    |   25 +-
 .../cache30/ClientServerCCEDUnitTest.java       |   43 +-
 .../DistributedNoAckRegionDUnitTest.java        |    2 +-
 .../cache30/GlobalRegionOffHeapDUnitTest.java   |   25 +-
 .../gemfire/cache30/MultiVMRegionTestCase.java  | 1148 ++++++++----------
 .../cache30/RolePerformanceDUnitTest.java       |   18 +-
 .../gemfire/cache30/TXDistributedDUnitTest.java |   27 +-
 .../DistributedLockServiceDUnitTest.java        |   89 +-
 .../gemfire/internal/SSLConfigJUnitTest.java    |   58 +-
 .../Bug34179TooManyFilesOpenJUnitTest.java      |   77 +-
 .../cache/ComplexDiskRegionJUnitTest.java       |   34 +-
 ...entFlushingAndRegionOperationsJUnitTest.java |   38 +-
 .../cache/ConcurrentMapOpsDUnitTest.java        |   55 +-
 .../ConcurrentRegionOperationsJUnitTest.java    |   10 +-
 ...rentRollingAndRegionOperationsJUnitTest.java |   28 +-
 .../internal/cache/ConflationJUnitTest.java     |  159 +--
 ...iskRandomOperationsAndRecoveryJUnitTest.java |   38 +-
 .../DiskRegCachexmlGeneratorJUnitTest.java      |  121 +-
 .../internal/cache/DiskRegCbkChkJUnitTest.java  |   38 +-
 .../DiskRegOplogSwtchingAndRollerJUnitTest.java |  130 +-
 .../cache/DiskRegRecoveryJUnitTest.java         |  215 ++--
 .../cache/DiskRegionAsyncRecoveryJUnitTest.java |   14 +-
 ...RegionChangingRegionAttributesJUnitTest.java |   51 +-
 .../internal/cache/DiskRegionJUnitTest.java     |  927 ++++++--------
 .../internal/cache/DiskRegionTestingBase.java   |  126 +-
 .../internal/cache/FaultingInJUnitTest.java     |   77 +-
 .../MultipleOplogsRollingFeatureJUnitTest.java  |   41 +-
 .../gemfire/internal/cache/OplogJUnitTest.java  |    8 +-
 .../cache/SimpleDiskRegionJUnitTest.java        |   16 +-
 ...skRegOverflowAsyncGetInMemPerfJUnitTest.java |   40 +-
 ...iskRegOverflowAsyncJUnitPerformanceTest.java |   55 +-
 ...lowSyncGetInMemPerfJUnitPerformanceTest.java |   53 +-
 ...DiskRegOverflowSyncJUnitPerformanceTest.java |   65 +-
 ...egionOverflowAsyncRollingOpLogJUnitTest.java |   13 +-
 ...RegionOverflowSyncRollingOpLogJUnitTest.java |   14 +-
 .../DiskRegionPerfJUnitPerformanceTest.java     |   78 +-
 .../DiskRegionPersistOnlySyncJUnitTest.java     |   34 +-
 ...DiskRegionRollOpLogJUnitPerformanceTest.java |   90 +-
 .../cache/ha/HARegionQueueDUnitTest.java        |  458 +++----
 .../tier/sockets/InterestListDUnitTest.java     |  530 ++++----
 .../management/ClientHealthStatsDUnitTest.java  |   89 +-
 .../gemfire/management/ManagementTestBase.java  |   50 +-
 .../gemfire/pdx/VersionClassLoader.java         |   98 --
 .../internal/LuceneEventListenerJUnitTest.java  |    7 +-
 45 files changed, 2050 insertions(+), 3350 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/NestedQueryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/NestedQueryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/NestedQueryJUnitTest.java
index 11dcb0e..c7ed9d7 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/NestedQueryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/NestedQueryJUnitTest.java
@@ -22,23 +22,20 @@
  */
 package com.gemstone.gemfire.cache.query.functional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.Set;
 
-import junit.framework.AssertionFailedError;
-
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.ErrorCollector;
 
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.CacheUtils;
@@ -53,29 +50,28 @@ import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache.query.internal.StructImpl;
 import com.gemstone.gemfire.cache.query.types.ObjectType;
-//import com.gemstone.gemfire.internal.util.DebuggerSupport;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
-/**
- *
- */
 @Category(IntegrationTest.class)
 public class NestedQueryJUnitTest {
-  ObjectType resType1=null;
-  ObjectType resType2= null;
 
-  int resSize1=0;
-  int resSize2=0;
+  private ObjectType resType1=null;
+  private ObjectType resType2= null;
+
+  private int resSize1=0;
+  private int resSize2=0;
 
-  Iterator itert1=null;
-  Iterator itert2=null;
+  private Iterator itert1=null;
+  private Iterator itert2=null;
 
-  Set set1=null;
-  Set set2=null;
+  private Set set1=null;
+  private Set set2=null;
 
-  String s1;
-  String s2;
+  private String s1;
+  private String s2;
 
+  @Rule
+  public ErrorCollector errorCollector = new ErrorCollector(); // used by testQueries
 
   @Before
   public void setUp() throws java.lang.Exception {
@@ -90,7 +86,9 @@ public class NestedQueryJUnitTest {
     CacheUtils.closeCache();
   }
 
-  public void atestQueries() throws Exception{
+  @Ignore("TODO: this test was disabled")
+  @Test
+  public void testQueries2() throws Exception{
     String queryString;
     Query query;
     Object result;
@@ -119,14 +117,14 @@ public class NestedQueryJUnitTest {
     query = CacheUtils.getQueryService().newQuery(queryString);
     result = query.execute();
     CacheUtils.log(Utils.printResult(result));
-
   }
+
   @Test
   public void testQueries() throws Exception {
     String queries[]={
         "SELECT DISTINCT * FROM /Portfolios WHERE NOT(SELECT DISTINCT * FROM positions.values p WHERE p.secId = 'IBM').isEmpty",
         "SELECT DISTINCT * FROM /Portfolios where NOT(SELECT DISTINCT * FROM /Portfolios p where p.ID = 0).isEmpty",
-        "SELECT DISTINCT * FROM /Portfolios where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p where ID = 0).status",
+        "SELECT DISTINCT * FROM /Portfolios where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p where p.ID = 0).status",
         "SELECT DISTINCT * FROM /Portfolios where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p where p.ID = 0).status",
         "SELECT DISTINCT * FROM /Portfolios x where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p where x.ID = p.ID).status",
         "SELECT DISTINCT * FROM /Portfolios x where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p where p.ID = x.ID).status",
@@ -136,19 +134,14 @@ public class NestedQueryJUnitTest {
       try{
         Query query = CacheUtils.getQueryService().newQuery(queries[i]);
         query.execute();
-        //CacheUtils.log(Utils.printResult(result));
-        CacheUtils.log("OK "+queries[i]);
       }catch(Exception e){
-        CacheUtils.log("FAILED "+queries[i]);
-        CacheUtils.log(e.getMessage());
-        //e.printStackTrace();
+        errorCollector.addError(e);
       }
     }
   }
+
   @Test
   public void testNestedQueriesEvaluation() throws Exception {
-
-
     QueryService qs;
     qs = CacheUtils.getQueryService();
     String queries[] = {
@@ -187,8 +180,7 @@ public class NestedQueryJUnitTest {
         // Iterator iter=set1.iterator();
 
       } catch (Exception e) {
-        e.printStackTrace();
-        fail(q.getQueryString());
+        throw new AssertionError(q.getQueryString(), e);
       }
     }
 
@@ -220,8 +212,7 @@ public class NestedQueryJUnitTest {
         set2=((r[i][1]).asSet());
 
       } catch (Exception e) {
-        e.printStackTrace();
-        fail(q.getQueryString());
+        throw new AssertionError(q.getQueryString(), e);
       }
     }
     for(int j=0;j<=1;j++){
@@ -246,12 +237,10 @@ public class NestedQueryJUnitTest {
         fail("FAILED: In both the Cases the members of ResultsSet are different.");
     }
     CacheUtils.compareResultsOfWithAndWithoutIndex(r, this);
-
   }
 
   @Test
-  public void testNestedQueriesResultsasStructSet() throws Exception {
-
+  public void testNestedQueriesResultsAsStructSet() throws Exception {
     QueryService qs;
     qs = CacheUtils.getQueryService();
     String queries[] = {
@@ -309,9 +298,7 @@ public class NestedQueryJUnitTest {
         // Iterator iter=set1.iterator();
 
       } catch (Exception e) {
-        AssertionFailedError afe = new AssertionFailedError(q.getQueryString());
-        afe.initCause(e);
-        throw afe;
+        throw new AssertionError(e);
       }
     }
 
@@ -336,8 +323,7 @@ public class NestedQueryJUnitTest {
         set2=((r[i][1]).asSet());
 
       } catch (Exception e) {
-        e.printStackTrace();
-        fail(q.getQueryString());
+        throw new AssertionError(q.getQueryString(), e);
       }
     }
     for(int j=0;j<queries.length;j++){
@@ -436,7 +422,6 @@ public class NestedQueryJUnitTest {
    * Tests a nested query with shorts converted to integer types in the result 
    * set of the inner query.  The short field in the outer query should be 
    * evaluated against the integer types and match.
-   * @throws Exception
    */
   @Test
   public void testNestedQueryWithShortTypesFromInnerQuery() throws Exception {
@@ -456,8 +441,6 @@ public class NestedQueryJUnitTest {
   /**
    * Tests a nested query that has duplicate results in the inner query
    * Results should not be duplicated in the final result set
-   * 
-   * @throws Exception
    */
   @Test
   public void testNestedQueryWithMultipleMatchingResultsWithIn() throws Exception {
@@ -474,10 +457,8 @@ public class NestedQueryJUnitTest {
     helpTestIndexForQuery("<trace>SELECT * FROM /portfolios1 p where p.ID in (SELECT p.ID FROM /portfolios1 p WHERE p.ID = 1)", "p.ID", "/portfolios1 p");
   }
 
-  /*
+  /**
    * helper method to test against a compact range index
-   * @param query
-   * @throws Exception
    */
   private void helpTestIndexForQuery(String query, String indexedExpression, String regionPath) throws Exception {
     QueryService qs = CacheUtils.getQueryService();
@@ -492,14 +473,17 @@ public class NestedQueryJUnitTest {
     assertTrue(observer.isIndexesUsed);
   }
 
-  class QueryObserverImpl extends QueryObserverAdapter{
-    boolean isIndexesUsed = false;
-    ArrayList indexesUsed = new ArrayList();
+  private static class QueryObserverImpl extends QueryObserverAdapter {
+
+    private boolean isIndexesUsed = false;
+    private ArrayList indexesUsed = new ArrayList();
 
+    @Override
     public void beforeIndexLookup(Index index, int oper, Object key) {
       indexesUsed.add(index.getName());
     }
 
+    @Override
     public void afterIndexLookup(Collection results) {
       if(results != null){
         isIndexesUsed = true;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
index feafcef..9961746 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
@@ -16,30 +16,18 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileWriter;
 import java.io.FilenameFilter;
 import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.util.Properties;
 
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import junit.framework.AssertionFailedError;
-
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 import org.xml.sax.SAXException;
 
-import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.CacheListener;
@@ -57,21 +45,18 @@ import com.gemstone.gemfire.cache.LoaderHelper;
 import com.gemstone.gemfire.cache.MirrorType;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.util.ObjectSizer;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.DiskWriteAttributesImpl;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlParser;
-import com.gemstone.gemfire.internal.cache.xmlcache.ClientCacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 import com.gemstone.gemfire.util.test.TestUtil;
 
 /**
@@ -614,7 +599,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
     File dir = null;
     try {
       dir = findFile(dirName);
-    } catch(AssertionFailedError e) {
+    } catch(AssertionError e) {
       //ignore, no directory.
     }
     if (dir != null && dir.exists()) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
index 1829454..3a00ff4 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
@@ -15,26 +15,19 @@
  * limitations under the License.
  */
 package com.gemstone.gemfire.cache30;
-
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
+import static org.junit.Assert.*;
 
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.Collection;
-import java.util.HashMap;
 
-import junit.framework.AssertionFailedError;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.DataPolicy;
@@ -48,8 +41,6 @@ import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.RegionEntry;
-import com.gemstone.gemfire.internal.cache.TombstoneService;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
@@ -62,13 +53,15 @@ import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * concurrency-control tests for client/server
  * 
  *
  */
-@Category(DistributedTest.class)
+@Category(DistributedTest.class)
 public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
   public static LocalRegion TestRegion;
   
@@ -87,17 +80,17 @@ public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
     super();
   }
 
-  @Test
+  @Test
   public void testClientServerRRTombstoneGC() {
     clientServerTombstoneGCTest(getUniqueName(), true);
   }
   
-  @Test
+  @Test
   public void testClientServerPRTombstoneGC() {
     clientServerTombstoneGCTest(getUniqueName(), false);
   }
   
-  @Test
+  @Test
   public void testPutAllInNonCCEClient() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -144,12 +137,12 @@ public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
    * registerInterest() to protect the client cache from stray putAll
    * events sitting in backup queues on the server 
    */
-  @Test
+  @Test
   public void testClientRIGetsTombstonesRR() throws Exception {
     clientRIGetsTombstoneTest(getUniqueName(),true);
   }
   
-  @Test
+  @Test
   public void testClientRIGetsTombstonesPR() throws Exception {
     clientRIGetsTombstoneTest(getUniqueName(),false);
   }
@@ -202,12 +195,12 @@ public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
     ensureAllTombstonesPresent(vm2);
   }
   
-  @Test
+  @Test
   public void testClientRIGetsInvalidEntriesRR() throws Exception {
     clientRIGetsInvalidEntriesTest(getUniqueName(),true);
   }
   
-  @Test
+  @Test
   public void testClientRIGetsInvalidEntriesPR() throws Exception {
     clientRIGetsInvalidEntriesTest(getUniqueName(),false);
   }
@@ -373,12 +366,12 @@ public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
 
   //  private void closeCache(VM vm) {
 
-  @Test
+  @Test
   public void testClientServerRRQueueCleanup() {  // see bug #50879 if this fails
     clientServerTombstoneMessageTest(true);
   }
   
-  @Test
+  @Test
   public void testClientServerPRQueueCleanup() {  // see bug #50879 if this fails
     clientServerTombstoneMessageTest(false);
   }
@@ -569,7 +562,7 @@ public class ClientServerCCEDUnitTest extends JUnit4CacheTestCase {
         if (TestRegion.getTombstoneCount() == 0) {
           LogWriterUtils.getLogWriter().warning("region has no tombstones");
 //          TestRegion.dumpBackingMap();
-          throw new AssertionFailedError("expected to find tombstones but region is empty");
+          throw new AssertionError("expected to find tombstones but region is empty");
         }
         return null;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
index 578c257..5f8b0c0 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
@@ -294,7 +294,7 @@ public class DistributedNoAckRegionDUnitTest extends MultiVMRegionTestCase {
 
   /**
    * The number of milliseconds to try repeating validation code in the
-   * event that AssertionFailedError is thrown.  For DISTRIBUTED_NO_ACK 
+   * event that AssertionError is thrown.  For DISTRIBUTED_NO_ACK
    * scopes, a repeat timeout is used to account for the fact that a
    * previous operation may have not yet completed.
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
index c225c04..05ebb44 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
@@ -16,17 +16,11 @@
  */
 package com.gemstone.gemfire.cache30;
 
+import java.util.Properties;
+
 import org.junit.Ignore;
-import org.junit.experimental.categories.Category;
 import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
-import java.util.Properties;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
@@ -34,20 +28,17 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
 import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * Tests Global Region with OffHeap memory.
  * 
  * @since 9.0
  */
-@SuppressWarnings({ "deprecation", "serial" })
 @Category(DistributedTest.class)
+@SuppressWarnings({ "deprecation", "serial", "rawtypes", "unchecked" })
 public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
 
-  public GlobalRegionOffHeapDUnitTest() {
-    super();
-  }
-  
   @Override
   public final void preTearDownAssertions() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
@@ -64,9 +55,9 @@ public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
   }
 
   @Override
-  @Ignore("DISABLED - bug 47951")
+  @Ignore("TODO: DISABLED due to bug 47951")
   @Test
-  public void testNBRegionInvalidationDuringGetInitialImage() throws Throwable {
+  public void testNBRegionInvalidationDuringGetInitialImage() throws Exception {
     //DISABLED - bug 47951
   }
 
@@ -77,7 +68,6 @@ public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
     return props;
   }
   
-  @SuppressWarnings({ "rawtypes", "unchecked" })
   @Override
   protected RegionAttributes getRegionAttributes() {
     RegionAttributes attrs = super.getRegionAttributes();
@@ -86,7 +76,6 @@ public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
     return factory.create();
   }
   
-  @SuppressWarnings({ "rawtypes", "unchecked" })
   @Override
   protected RegionAttributes getRegionAttributes(String type) {
     RegionAttributes ra = super.getRegionAttributes(type);


[2/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
index f13e46e..ff199d0 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
@@ -16,22 +16,16 @@
  */
 package com.gemstone.gemfire.internal.cache.tier.sockets;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import junit.framework.AssertionFailedError;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
@@ -45,10 +39,14 @@ import com.gemstone.gemfire.cache.InterestRegistrationEvent;
 import com.gemstone.gemfire.cache.InterestRegistrationListener;
 import com.gemstone.gemfire.cache.InterestResultPolicy;
 import com.gemstone.gemfire.cache.LoaderHelper;
+import com.gemstone.gemfire.cache.NoSubscriptionServersAvailableException;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionShortcut;
 import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.client.PoolFactory;
+import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedMember;
@@ -57,7 +55,6 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.Assert;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
@@ -66,8 +63,8 @@ import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
-import com.gemstone.gemfire.cache.client.*;
-import com.gemstone.gemfire.cache.NoSubscriptionServersAvailableException;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * Test Scenario :
@@ -104,57 +101,68 @@ import com.gemstone.gemfire.cache.NoSubscriptionServersAvailableException;
  * then both client connects to the server
  * c1 register(k1,k2,k3) and c2 register (k4,k5)
  * then verify that updates has occurred as a result of interest registration.
- *
- *
- *
  */
 @Category(DistributedTest.class)
-public class InterestListDUnitTest extends JUnit4DistributedTestCase
-{
-  static Cache cache = null;
-
-  VM vm0 = null;
+public class InterestListDUnitTest extends JUnit4DistributedTestCase {
 
-  VM vm1 = null;
+  private static final String REGION_NAME = "InterestListDUnitTest_region";
 
-  VM vm2 = null;
-
-  /** the server cache's port number */
-  int PORT1;
+  // using a Integer instead of String to make sure ALL_KEYS works on non-String keys
+  private final static Integer key1 = new Integer(1);
+  private final static Integer key2 = new Integer(2);
+  private final static String key1_originalValue = "key-1-orig-value";
+  private final static String key2_originalValue = "key-2-orig-value";
 
-  // using a Integer instead of String to make sure ALL_KEYS works
-  // on non-String keys
-  final static Integer key1 = new Integer(1);
-  final static Integer key2 = new Integer(2);
-  final static String key1_originalValue = "key-1-orig-value";
-  final static String key2_originalValue = "key-2-orig-value";
-
-  static final String REGION_NAME = "InterestListDUnitTest_region";
+  private static Cache cache = null;
 
   /** some tests use this to hold the server for invoke() access */
-  static CacheServer server;
+  private static CacheServer server;
 
   /** interestListener listens in cache server vms */
-  static InterestListener interestListener;
+  private static InterestListener interestListener;
 
-  /** constructor */
-  public InterestListDUnitTest() {
-    super();
-  }
+  private VM vm0 = null;
+  private VM vm1 = null;
+  private VM vm2 = null;
+
+  /** the server cache's port number */
+  private int PORT1;
 
   @Override
   public final void postSetUp() throws Exception {
     disconnectAllFromDS();
-    Wait.pause(10000);
+
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
     vm2 = host.getVM(2);
+
     // start servers first
-    PORT1 =  ((Integer) vm0.invoke(() -> InterestListDUnitTest.createServerCache())).intValue();
+    PORT1 = vm0.invoke(() -> InterestListDUnitTest.createServerCache());
   }
 
-/**
+  @Override
+  public final void preTearDown() throws Exception {
+    // close the clients first
+    vm1.invoke(() -> InterestListDUnitTest.closeCache());
+    vm2.invoke(() -> InterestListDUnitTest.closeCache());
+    // then close the servers
+    vm0.invoke(() -> InterestListDUnitTest.closeCache());
+
+    cache = null;
+    server = null;
+    interestListener = null;
+
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
+      public void run() {
+        cache = null;
+        server = null;
+        interestListener = null;
+      }
+    });
+  }
+
+  /**
    * one server two clients
    * create Entries in all the vms
    * c1 : register (k1)
@@ -169,69 +177,64 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
    * c2 : validate (k1 == vm2-k1) AND (k2 == vm2-k2) // as both are not registered
    * c2 : put (k1 -> vm2-k1-again) AND (k2 -> vm2-k2-again)
    * c1 : validate (k1 == vm1-k1-again) AND (k2 == vm1-k2-again)// as both are not registered
-   *
    */
   @Test
-  public void testInterestListRegistration()
-    {
-
-      vm1.invoke(() -> InterestListDUnitTest.createClientCache(
-        NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)));
-      vm2.invoke(() -> InterestListDUnitTest.createClientCache(
-        NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)));
-
-      vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
-      vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
-
-      vm1.invoke(() -> InterestListDUnitTest.registerKey( key1 ));
-      vm2.invoke(() -> InterestListDUnitTest.registerKey( key2 ));
-
-      vm1.invoke(() -> InterestListDUnitTest.put( "vm1" ));
-      Wait.pause(10000);
-      vm2.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm2" ));
-      vm2.invoke(() -> InterestListDUnitTest.put( "vm2" ));
-      Wait.pause(10000);
-      vm1.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm1" ));
-
-      vm1.invoke(() -> InterestListDUnitTest.unregisterKey( key1 ));
-      vm2.invoke(() -> InterestListDUnitTest.unregisterKey( key2 ));
-
-      vm1.invoke(() -> InterestListDUnitTest.putAgain( "vm1" ));
-      Wait.pause(10000);
-      vm2.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm2" ));
-      vm2.invoke(() -> InterestListDUnitTest.putAgain( "vm2" ));
-      Wait.pause(10000);
-      vm1.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm1" ));
-    }
+  public void testInterestListRegistration() throws Exception {
+    vm1.invoke(() -> InterestListDUnitTest.createClientCache(
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)));
+    vm2.invoke(() -> InterestListDUnitTest.createClientCache(
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)));
 
-/**
- * one server two clients
- * create Entries in all the vms
- *
- * STEP 1:
- * c2:  put (k2 -> vm-k2)
- * c1:  validate k2 == k2 (not updated because no interest)
- *
- * STEP 2
- * c1:  register k2
- * c1 : validate k2 == vm-k2 (updated because of registerInterest)
- * c1:  validate k1 == k1 (other key not updated because still no interest)
- *
- * STEP 3:
- * c1:  put (k1 -> vm-k1)
- * c2:  validate k1 == k1 (not updated because no interest)
- * c2:  register k1
- * c2:  validate k1 == vm-k1 (updated because of registerInterest)
- *
- * STEP 4:
- * c2:  unregister k1
- * c1:  put k1->k1 (old value)
- * c2:  validate k1 == vm-k1 (no interest, so missing update)
- */
-  @Test
-  public void testValueRefresh()
-  {
+    vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+    vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+
+    vm1.invoke(() -> InterestListDUnitTest.registerKey( key1 ));
+    vm2.invoke(() -> InterestListDUnitTest.registerKey( key2 ));
+
+    vm1.invoke(() -> InterestListDUnitTest.put( "vm1" ));
+    Wait.pause(10000);
+    vm2.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.put( "vm2" ));
+    Wait.pause(10000);
+    vm1.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm1" ));
+
+    vm1.invoke(() -> InterestListDUnitTest.unregisterKey( key1 ));
+    vm2.invoke(() -> InterestListDUnitTest.unregisterKey( key2 ));
 
+    vm1.invoke(() -> InterestListDUnitTest.putAgain( "vm1" ));
+    Wait.pause(10000);
+    vm2.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.putAgain( "vm2" ));
+    Wait.pause(10000);
+    vm1.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm1" ));
+  }
+
+  /**
+   * one server two clients
+   * create Entries in all the vms
+   *
+   * STEP 1:
+   * c2:  put (k2 -> vm-k2)
+   * c1:  validate k2 == k2 (not updated because no interest)
+   *
+   * STEP 2
+   * c1:  register k2
+   * c1 : validate k2 == vm-k2 (updated because of registerInterest)
+   * c1:  validate k1 == k1 (other key not updated because still no interest)
+   *
+   * STEP 3:
+   * c1:  put (k1 -> vm-k1)
+   * c2:  validate k1 == k1 (not updated because no interest)
+   * c2:  register k1
+   * c2:  validate k1 == vm-k1 (updated because of registerInterest)
+   *
+   * STEP 4:
+   * c2:  unregister k1
+   * c1:  put k1->k1 (old value)
+   * c2:  validate k1 == vm-k1 (no interest, so missing update)
+   */
+  @Test
+  public void testValueRefresh() throws Exception {
     // Initialization
     Host host = Host.getHost(0);
     vm1.invoke(() -> InterestListDUnitTest.createClientCache( NetworkUtils.getServerHostName(host), new Integer(PORT1)));
@@ -269,16 +272,13 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     vm2.invoke(() -> InterestListDUnitTest.validateSingleEntry(key1, "vm1")); // update lost
   }
 
-/**
- * one server two clients
- * create Entries in all the vms
- * register ALL_KEYS and verifies that updates are receiving to all the keys
- *
- */
+  /**
+   * one server two clients
+   * create Entries in all the vms
+   * register ALL_KEYS and verifies that updates are receiving to all the keys
+   */
   @Test
-  public void testInterestListRegistration_ALL_KEYS()
-  {
-
+  public void testInterestListRegistration_ALL_KEYS() throws Exception {
     vm1.invoke(() -> InterestListDUnitTest.createClientCache(
       NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)));
     vm2.invoke(() -> InterestListDUnitTest.createClientCache(
@@ -292,20 +292,18 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     vm1.invoke(() -> InterestListDUnitTest.put_ALL_KEYS());
     Wait.pause(10000);
     vm2.invoke(() -> InterestListDUnitTest.validate_ALL_KEYS());
-
   }
- /**
-  * one server two clients
-  * create Entries in all the vms
-  * server directly puts some values
-  * then both clients connect to the server
-  * c1 register(k1,k2,k3) and c2 register (k4,k5)
-  * then verify that updates has occured as a result of interest registration.
-  *
-  */
+
+  /**
+   * one server two clients
+   * create Entries in all the vms
+   * server directly puts some values
+   * then both clients connect to the server
+   * c1 register(k1,k2,k3) and c2 register (k4,k5)
+   * then verify that updates has occured as a result of interest registration.
+   */
   @Test
-  public void testInitializationOfRegionFromInterestList()
-  {
+  public void testInitializationOfRegionFromInterestList() throws Exception {
     // directly put on server
     vm0.invoke(() -> InterestListDUnitTest.multiple_put());
     Wait.pause(1000);
@@ -322,7 +320,6 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     // verify the values for registered keys
     vm1.invoke(() -> InterestListDUnitTest.validateRegionEntriesFromInterestListInVm1());
     vm2.invoke(() -> InterestListDUnitTest.validateRegionEntriesFromInterestListInVm2());
-
   }
 
   /**
@@ -340,129 +337,124 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
    * c2 : validate (k1 == vm2-k1) AND (k2 == vm2-k2) // as both are not registered
    * c2 : put (k1 -> vm2-k1-again) AND (k2 -> vm2-k2-again)
    * c1 : validate (k1 == vm1-k1-again) AND (k2 == vm1-k2-again)// as both are not registered
-   *
    */
   @Test
-  public void testInterestListRegistrationOnServer()
-    {
-
-      DistributedMember c1 = (DistributedMember)vm1
-        .invoke(() -> InterestListDUnitTest.createClientCache(
-          NetworkUtils.getServerHostName(vm0.getHost()), PORT1));
-      DistributedMember c2 = (DistributedMember)vm2
-        .invoke(() -> InterestListDUnitTest.createClientCache(
-          NetworkUtils.getServerHostName(vm0.getHost()), PORT1));
+  public void testInterestListRegistrationOnServer() throws Exception {
+    DistributedMember c1 = (DistributedMember)vm1
+      .invoke(() -> InterestListDUnitTest.createClientCache(
+        NetworkUtils.getServerHostName(vm0.getHost()), PORT1));
+    DistributedMember c2 = (DistributedMember)vm2
+      .invoke(() -> InterestListDUnitTest.createClientCache(
+        NetworkUtils.getServerHostName(vm0.getHost()), PORT1));
 
-      vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
-      vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+    vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+    vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
 
-      vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c1, key1 ));
-      vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c2, key2 ));
+    vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c1, key1 ));
+    vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c2, key2 ));
 
-      vm0.invoke(() -> InterestListDUnitTest.flushQueues());
+    vm0.invoke(() -> InterestListDUnitTest.flushQueues());
 
-      vm1.invoke(() -> InterestListDUnitTest.put( "vm1" ));
+    vm1.invoke(() -> InterestListDUnitTest.put( "vm1" ));
 
-      vm0.invoke(() -> InterestListDUnitTest.flushQueues());
+    vm0.invoke(() -> InterestListDUnitTest.flushQueues());
 
-      vm2.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm2" ));
-      vm2.invoke(() -> InterestListDUnitTest.put( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.put( "vm2" ));
 
-      vm0.invoke(() -> InterestListDUnitTest.flushQueues());
+    vm0.invoke(() -> InterestListDUnitTest.flushQueues());
 
-      vm1.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm1" ));
+    vm1.invoke(() -> InterestListDUnitTest.validateEntriesK1andK2( "vm1" ));
 
-      vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c1, key1 ));
-      vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c2, key2 ));
+    vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c1, key1 ));
+    vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c2, key2 ));
 
-      vm1.invoke(() -> InterestListDUnitTest.putAgain( "vm1" ));
+    vm1.invoke(() -> InterestListDUnitTest.putAgain( "vm1" ));
 
-      vm0.invoke(() -> InterestListDUnitTest.flushQueues());
+    vm0.invoke(() -> InterestListDUnitTest.flushQueues());
 
-      vm2.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm2" ));
-      vm2.invoke(() -> InterestListDUnitTest.putAgain( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm2" ));
+    vm2.invoke(() -> InterestListDUnitTest.putAgain( "vm2" ));
 
-      vm0.invoke(() -> InterestListDUnitTest.flushQueues());
+    vm0.invoke(() -> InterestListDUnitTest.flushQueues());
 
-      vm1.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm1" ));
-    }
+    vm1.invoke(() -> InterestListDUnitTest.validateEntriesAgain( "vm1" ));
+  }
 
-    /**
-     * two servers one client
-     * create Entries in all the vms
-     * register interest in various ways and ensure that registration listeners
-     * are properly invoked
-     * @throws Exception 
-     */
+  /**
+   * two servers one client
+   * create Entries in all the vms
+   * register interest in various ways and ensure that registration listeners
+   * are properly invoked
+   */
   @Test
   public void testInterestRegistrationListeners() throws Exception {
-      int port2;
+    int port2;
 
-      createCache();
-      server = addCacheServer();
-      port2 = server.getPort();
+    createCache();
+    server = addCacheServer();
+    port2 = server.getPort();
 
-      addRegisterInterestListener();
-      vm0.invoke(() -> InterestListDUnitTest.addRegisterInterestListener());
+    addRegisterInterestListener();
+    vm0.invoke(() -> InterestListDUnitTest.addRegisterInterestListener());
 
-      // servers are set up, now do the clients
-      DistributedMember c1 = (DistributedMember)vm1
-      .invoke(() -> InterestListDUnitTest.createClientCache(
-        NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2));
-      DistributedMember c2 = (DistributedMember)vm2
-      .invoke(() -> InterestListDUnitTest.createClientCache(
-        NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2));
+    // servers are set up, now do the clients
+    DistributedMember c1 = (DistributedMember)vm1
+    .invoke(() -> InterestListDUnitTest.createClientCache(
+      NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2));
+    DistributedMember c2 = (DistributedMember)vm2
+    .invoke(() -> InterestListDUnitTest.createClientCache(
+      NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2));
 
-      vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
-      vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+    vm1.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
+    vm2.invoke(() -> InterestListDUnitTest.createEntriesK1andK2());
 
-      // interest registration from clients should cause listeners to be invoked
-      // in both servers
-      LogWriterUtils.getLogWriter().info("test phase 1");
-      vm1.invoke(() -> InterestListDUnitTest.registerKey( key1 ));
-      vm2.invoke(() -> InterestListDUnitTest.registerKey( key2 ));
+    // interest registration from clients should cause listeners to be invoked
+    // in both servers
+    LogWriterUtils.getLogWriter().info("test phase 1");
+    vm1.invoke(() -> InterestListDUnitTest.registerKey( key1 ));
+    vm2.invoke(() -> InterestListDUnitTest.registerKey( key2 ));
 
-      Integer zero = new Integer(0);
-      Integer two = new Integer(2);
+    Integer zero = new Integer(0);
+    Integer two = new Integer(2);
 
-      interestListener.verifyCountsAndClear(2, 0);
-      vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( two, zero ));
+    interestListener.verifyCountsAndClear(2, 0);
+    vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( two, zero ));
 
-      // unregistration from clients should invoke listeners on both servers
-      LogWriterUtils.getLogWriter().info("test phase 2");
-      vm1.invoke(() -> InterestListDUnitTest.unregisterKey( key1 ));
-      vm2.invoke(() -> InterestListDUnitTest.unregisterKey( key2 ));
+    // unregistration from clients should invoke listeners on both servers
+    LogWriterUtils.getLogWriter().info("test phase 2");
+    vm1.invoke(() -> InterestListDUnitTest.unregisterKey( key1 ));
+    vm2.invoke(() -> InterestListDUnitTest.unregisterKey( key2 ));
 
-      interestListener.verifyCountsAndClear(0, 2);
-      vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( zero, two ));
+    interestListener.verifyCountsAndClear(0, 2);
+    vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( zero, two ));
 
-      // now the primary server for eache client will register and unregister
-      LogWriterUtils.getLogWriter().info("test phase 3");
-      registerKeyForClient(c1, key1);
-      vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c1, key1 ));
-      registerKeyForClient(c2, key2);
-      vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c2, key2 ));
+    // now the primary server for eache client will register and unregister
+    LogWriterUtils.getLogWriter().info("test phase 3");
+    registerKeyForClient(c1, key1);
+    vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c1, key1 ));
+    registerKeyForClient(c2, key2);
+    vm0.invoke(() -> InterestListDUnitTest.registerKeyForClient( c2, key2 ));
 
-      interestListener.verifyCountsAndClear(2, 0);
-      vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( two, zero ));
+    interestListener.verifyCountsAndClear(2, 0);
+    vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( two, zero ));
 
-      LogWriterUtils.getLogWriter().info("test phase 4");
-      unregisterKeyForClient(c1, key1);
-      vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c1, key1 ));
-      unregisterKeyForClient(c2, key2);
-      vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c2, key2 ));
+    LogWriterUtils.getLogWriter().info("test phase 4");
+    unregisterKeyForClient(c1, key1);
+    vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c1, key1 ));
+    unregisterKeyForClient(c2, key2);
+    vm0.invoke(() -> InterestListDUnitTest.unregisterKeyForClient( c2, key2 ));
 
-      interestListener.verifyCountsAndClear(0, 2);
-      vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( zero, two ));
-    }
+    interestListener.verifyCountsAndClear(0, 2);
+    vm0.invoke(() -> InterestListDUnitTest.verifyCountsAndClear( zero, two ));
+  }
 
   /**
    * This tests whether an exception is thrown in register/unregister when no
    * server is available.
    */
   @Test
-  public void testNoAvailableServer() {
-
+  public void testNoAvailableServer() throws Exception {
     // Register interest in key1.
     vm1.invoke(() -> InterestListDUnitTest.createClientCache( NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1) ));
     vm1.invoke(() -> InterestListDUnitTest.registerKey( key1 ));
@@ -483,7 +475,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
   }
 
   @Test
-  public void testRegisterInterestOnPartitionedRegionWithCacheLoader() {
+  public void testRegisterInterestOnPartitionedRegionWithCacheLoader() throws Exception {
     runRegisterInterestWithCacheLoaderTest(false);
   }
 
@@ -524,20 +516,17 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     vm1.invoke(() -> InterestListDUnitTest.confirmNoCacheListenerInvalidates());
   }
   
-  private  void createCache(Properties props) throws Exception
-  {
+  private  void createCache(Properties props) throws Exception {
     DistributedSystem ds = getSystem(props);
     cache = CacheFactory.create(ds);
     assertNotNull(cache);
   }
 
-  public static DistributedMember createClientCache(String host, int port) throws Exception {
+  private static DistributedMember createClientCache(String host, int port) throws Exception {
     return createClientCache(host, port, 0);
   }
 
-  public static DistributedMember createClientCache(String host,
-      int port, int port2) throws Exception
-  {
+  private static DistributedMember createClientCache(String host, int port, int port2) throws Exception {
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -607,25 +596,27 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
   // this method is for use in vm0 where the CacheServer used by
   // most of these tests resides.  This server is held in the
   // static variable 'server1'
-  public static Integer createServerCache() throws Exception {
+  private static Integer createServerCache() throws Exception {
     return createServerCache(true);
   }
 
-  public static Integer createServerCache(boolean addReplicatedRegion) throws Exception {
+  private static Integer createServerCache(boolean addReplicatedRegion) throws Exception {
     createCache(addReplicatedRegion);
     server = addCacheServer();
     return new Integer(server.getPort());
   }
 
   /** wait for queues to drain in the server */
-  public static void flushQueues() throws Exception {
+  private static void flushQueues() throws Exception {
     CacheServerImpl impl = (CacheServerImpl)server;
     for (CacheClientProxy proxy: (Set<CacheClientProxy>)impl.getAllClientSessions()) {
       final CacheClientProxy fproxy = proxy;
       WaitCriterion ev = new WaitCriterion() {
+        @Override
         public boolean done() {
           return fproxy.getHARegionQueue().size() == 0;
         }
+        @Override
         public String description() {
           return "waiting for queues to drain for " + fproxy.getProxyID();
         }
@@ -634,7 +625,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void addRegisterInterestListener() {
+  private static void addRegisterInterestListener() {
     interestListener = new InterestListener();
     List<CacheServer> servers = cache.getCacheServers();
     for (CacheServer s: servers) {
@@ -669,9 +660,11 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     final EventCountingCacheListener fCacheListener = (EventCountingCacheListener) region.getAttributes().getCacheListener();
   
     WaitCriterion ev = new WaitCriterion() {
+      @Override
       public boolean done() {
         return fCacheListener.hasReceivedAllCreateEvents();
       }
+      @Override
       public String description() {
         return "waiting for " + fCacheListener.getExpectedCreates() + " create events";
       }
@@ -695,12 +688,11 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     assertEquals(0/*expected*/, cacheListener.getInvalidates()/*actual*/);
   }
 
-  public static void verifyCountsAndClear(int count1, int count2) {
+  private static void verifyCountsAndClear(int count1, int count2) {
     interestListener.verifyCountsAndClear(count1, count2);
   }
 
-  public static void createEntriesK1andK2()
-  {
+  private static void createEntriesK1andK2() {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -720,14 +712,12 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
   }
 
   private static void registerKeyOnly(Object key) {
-
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     r.registerInterest(key);
   }
 
-  public static void registerKey(Object key)
-  {
+  private static void registerKey(Object key) {
     try {
       registerKeyOnly(key);
     }
@@ -742,7 +732,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
    * @param clientId the DM of the client
    * @param key the key that the client is interested in
    */
-  public static void registerKeyForClient(DistributedMember clientId, Object key) {
+  private static void registerKeyForClient(DistributedMember clientId, Object key) {
     try {
       ClientSession cs = server.getClientSession(clientId);
       if (cs.isPrimary()) {
@@ -754,8 +744,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void registerKeyEx(Object key) {
-
+  private static void registerKeyEx(Object key) {
     try {
       registerKeyOnly(key);
       fail("Expected an exception during register interest with no available servers.");
@@ -766,8 +755,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void registerALL_KEYS()
-  {
+  private static void registerALL_KEYS() {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -778,8 +766,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void put_ALL_KEYS()
-  {
+  private static void put_ALL_KEYS() {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -794,8 +781,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void validate_ALL_KEYS()
-  {
+  private static void validate_ALL_KEYS() {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -807,8 +793,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void registerKeys()
-  {
+  private static void registerKeys() {
     List list = new ArrayList();
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
@@ -822,8 +807,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void registerKeysAgain()
-  {
+  private static void registerKeysAgain() {
     List list = new ArrayList();
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
@@ -838,14 +822,12 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
   }
 
   private static void unregisterKeyOnly(Object key) {
-
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     r.unregisterInterest(key);
   }
 
-  public static void unregisterKey(Object key)
-  {
+  private static void unregisterKey(Object key) {
     try {
       unregisterKeyOnly(key);
     }
@@ -859,8 +841,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
    * @param clientId the client's ID
    * @param key the key it's no longer interest in
    */
-  public static void unregisterKeyForClient(DistributedMember clientId, Object key)
-  {
+  private static void unregisterKeyForClient(DistributedMember clientId, Object key) {
     try {
       ClientSession cs = server.getClientSession(clientId);
       if (cs.isPrimary()) {
@@ -872,12 +853,11 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void unregisterKeyEx(Object key) {
+  private static void unregisterKeyEx(Object key) {
     unregisterKeyOnly(key);
   }
 
-  public static void validateRegionEntriesFromInterestListInVm1()
-  {
+  private static void validateRegionEntriesFromInterestListInVm1() {
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     Region.Entry k1, k2;
@@ -889,8 +869,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     assertEquals(k2.getValue(), "server2");
   }
 
-  public static void validateRegionEntriesFromInterestListInVm2()
-  {
+  private static void validateRegionEntriesFromInterestListInVm2() {
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     assertEquals(r.getEntry("k3").getValue(), "server3");
@@ -898,7 +877,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     assertEquals(r.getEntry("k5").getValue(), "server5");
   }
 
-  public static void putSingleEntry(Object key, String value) {
+  private static void putSingleEntry(Object key, String value) {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -911,8 +890,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void put(String vm)
-  {
+  private static void put(String vm) {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -937,8 +915,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void multiple_put()
-  {
+  private static void multiple_put() {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -954,8 +931,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void putAgain(String vm)
-  {
+  private static void putAgain(String vm) {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -980,9 +956,9 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void validateEntriesK1andK2(final String vm)
-  {
+  private static void validateEntriesK1andK2(final String vm) {
     WaitCriterion ev = new WaitCriterion() {
+      @Override
       public boolean done() {
         try {
           Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
@@ -1001,10 +977,11 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
           }
           return true;
         }
-        catch (AssertionFailedError ex) {
+        catch (AssertionError ex) {
           return false;
         }
       }
+      @Override
       public String description() {
         return "waiting for client to apply events from server";
       }
@@ -1012,7 +989,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     Wait.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
   }
 
-  public static void validateSingleEntry(Object key, String value) {
+  private static void validateSingleEntry(Object key, String value) {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertEquals(value, r.getEntry(key).getValue());
@@ -1022,8 +999,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  public static void validateEntriesAgain(String vm)
-  {
+  private static void validateEntriesAgain(String vm) {
     try {
       Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
       assertNotNull(r);
@@ -1044,40 +1020,25 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
     }
   }
 
-  @Override
-  public final void preTearDown() throws Exception {
-    // close the clients first
-    vm1.invoke(() -> InterestListDUnitTest.closeCache());
-    vm2.invoke(() -> InterestListDUnitTest.closeCache());
-    // then close the servers
-    vm0.invoke(() -> InterestListDUnitTest.closeCache());
-    cache = null;
-    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
-  }
-
-  public static void closeCache()
-  {
+  private static void closeCache() {
     if (cache != null && !cache.isClosed()) {
       cache.close();
       cache.getDistributedSystem().disconnect();
     }
   }
 
-  static class InterestListener implements InterestRegistrationListener {
+  private static class InterestListener implements InterestRegistrationListener {
+
     private int registrationCount;
     private int unregistrationCount;
 
-    /* (non-Javadoc)
-     * @see com.gemstone.gemfire.cache.InterestRegistrationListener#afterRegisterInterest(com.gemstone.gemfire.cache.InterestRegistrationEvent)
-     */
+    @Override
     public void afterRegisterInterest(InterestRegistrationEvent event) {
       LogWriterUtils.getLogWriter().info("InterestListener.afterRegisterInterest invoked with this event: " + event);
       registrationCount++;
     }
 
-    /* (non-Javadoc)
-     * @see com.gemstone.gemfire.cache.InterestRegistrationListener#afterUnregisterInterest(com.gemstone.gemfire.cache.InterestRegistrationEvent)
-     */
+    @Override
     public void afterUnregisterInterest(InterestRegistrationEvent event) {
       LogWriterUtils.getLogWriter().info("InterestListener.afterUnregisterInterest invoked with this event: " + event);
       unregistrationCount++;
@@ -1096,9 +1057,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
       unregistrationCount = 0;
     }
 
-    /* (non-Javadoc)
-     * @see com.gemstone.gemfire.cache.CacheCallback#close()
-     */
+    @Override
     public void close() {
     }
 
@@ -1109,9 +1068,7 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
   private static class EventCountingCacheListener extends CacheListenerAdapter {
 
     private AtomicInteger creates = new AtomicInteger();
-
     private AtomicInteger updates = new AtomicInteger();
-
     private AtomicInteger invalidates = new AtomicInteger();
 
     private int expectedCreates;
@@ -1124,15 +1081,18 @@ public class InterestListDUnitTest extends JUnit4DistributedTestCase
       return this.expectedCreates;
     }
 
+    @Override
     public void afterCreate(EntryEvent event) {
       incrementCreates();
     }
 
+    @Override
     public void afterUpdate(EntryEvent event) {
       incrementUpdates();
       event.getRegion().getCache().getLogger().warning("Received update event " + getUpdates() + " for " + event.getKey());
     }
 
+    @Override
     public void afterInvalidate(EntryEvent event) {
       incrementInvalidates();
       event.getRegion().getCache().getLogger().warning("Received invalidate event " + getInvalidates() + " for " + event.getKey());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
index c940869..6661a0a 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
@@ -16,19 +16,15 @@
  */
 package com.gemstone.gemfire.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+import static com.gemstone.gemfire.test.dunit.Assert.*;
 
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.Properties;
 
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Region;
@@ -45,41 +41,31 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.IgnoredException;
-import com.gemstone.gemfire.test.dunit.RMIException;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
-
-import junit.framework.AssertionFailedError;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * Client health stats check
- * 
- * 
  */
 @Category(DistributedTest.class)
+@SuppressWarnings("serial")
 public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
 
   private static final String k1 = "k1";
-
   private static final String k2 = "k2";
-
   private static final String client_k1 = "client-k1";
-
   private static final String client_k2 = "client-k2";
 
   /** name of the test region */
   private static final String REGION_NAME = "ClientHealthStatsDUnitTest_Region";
 
-  private VM server = null;
-
   private static VM client = null;
-  
   private static VM client2 = null;
-
   private static VM managingNode = null;
 
   private static ManagementTestBase helper = new ManagementTestBase(){};
@@ -91,9 +77,7 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
   
   private static GemFireCacheImpl cache = null;
 
-  public ClientHealthStatsDUnitTest() {
-    super();
-  }
+  private VM server = null;
 
   @Override
   public final void postSetUp() throws Exception {
@@ -104,6 +88,7 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     server = host.getVM(1);
     client = host.getVM(2);
     client2 = host.getVM(3);
+
     IgnoredException.addIgnoredException("Connection reset");
   }
 
@@ -117,19 +102,16 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
 
     disconnectAllFromDS();
   }
-  
-  public static void reset() throws Exception {
+
+  private static void reset() throws Exception {
     lastKeyReceived = false;
     numOfCreates = 0;
     numOfUpdates = 0;
     numOfInvalidates = 0;
   }
 
-  private static final long serialVersionUID = 1L;
-
   @Test
   public void testClientHealthStats_SubscriptionEnabled() throws Exception {
-
     helper.createManagementCache(managingNode);
     helper.startManagingNode(managingNode);
 
@@ -150,7 +132,6 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
   
   @Test
   public void testClientHealthStats_SubscriptionDisabled() throws Exception {
-
     helper.createManagementCache(managingNode);
     helper.startManagingNode(managingNode);
 
@@ -171,7 +152,6 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
   
   @Test
   public void testClientHealthStats_DurableClient() throws Exception {
-
     helper.createManagementCache(managingNode);
     helper.startManagingNode(managingNode);
 
@@ -238,15 +218,14 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
       
       @Override
       public String description() {
-        // TODO Auto-generated method stub
         return "Proxy has not paused yet";
       }
     };
     
     Wait.waitForCriterion(criterion, 15 * 1000, 200, true);	  
   }
-  
-  public static int createServerCache() throws Exception {
+
+  private static int createServerCache() throws Exception {
     Cache cache = helper.createCache(false);
 
     RegionFactory<String, String> rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
@@ -259,22 +238,16 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     return server1.getPort();
   }
 
-  
-  
-  public static void closeClientCache() throws Exception {
+  private static void closeClientCache() throws Exception {
     cache.close(true);
   }
 
-  public static void createClientCache(Host host, Integer port, int clientNum, boolean subscriptionEnabled, boolean durable) throws Exception {
-
+  private static void createClientCache(Host host, Integer port, int clientNum, boolean subscriptionEnabled, boolean durable) throws Exception {
     Properties props = new Properties();
     props.setProperty(DistributionConfig.DURABLE_CLIENT_ID_NAME, "durable-"+clientNum);
     props.setProperty(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, "300000");
-
-//    props.setProperty("log-file", getTestMethodName()+"_client_" + clientNum + ".log");
     props.setProperty("log-level", "info");
-    props.setProperty("statistic-archive-file", getTestMethodName()+"_client_" + clientNum
-        + ".gfs");
+    props.setProperty("statistic-archive-file", getTestMethodName()+"_client_" + clientNum + ".gfs");
     props.setProperty("statistic-sampling-enabled", "true");
 
     ClientCacheFactory ccf = new ClientCacheFactory(props);
@@ -321,10 +294,9 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
       r.registerInterest("ALL_KEYS", true);
       cache.readyForEvents();
     }
-
   }
 
-  public static void doPuts() throws Exception {
+  private static void doPuts() throws Exception {
     Cache cache = GemFireCacheImpl.getInstance();
     final Region<String, String> r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     Thread t1 = new Thread(new Runnable() {
@@ -357,8 +329,8 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     t2.join();
     t3.join();
   }
-  
-  public static void resumePuts() {
+
+  private static void resumePuts() {
     Cache cache = GemFireCacheImpl.getInstance();
     Region<String, String> r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     for (int i = 0; i < 100; i++) {
@@ -367,7 +339,7 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     r.put("last_key", "last_value");
   }
 
-  public static void waitForLastKey() {
+  private static void waitForLastKey() {
     WaitCriterion wc = new WaitCriterion() {
       @Override
       public boolean done() {
@@ -381,14 +353,12 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     Wait.waitForCriterion(wc, 60*1000, 500, true);
   }
 
-
-  @SuppressWarnings("serial")
-  protected static DistributedMember getMember() throws Exception {
+  private static DistributedMember getMember() throws Exception {
     GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     return cache.getDistributedSystem().getDistributedMember();
   }
 
-  protected static void verifyClientStats(DistributedMember serverMember, int serverPort, int numSubscriptions) {
+  private static void verifyClientStats(DistributedMember serverMember, int serverPort, int numSubscriptions) {
     GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     try {
       ManagementService service = ManagementService.getExistingManagementService(cache);
@@ -401,8 +371,6 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
       
       ClientHealthStatus[] clientStatuses = bean.showAllClientStats();
 
- 
-      
       ClientHealthStatus clientStatus1 = bean.showClientStats(clientIds[0]);
       ClientHealthStatus clientStatus2 = bean.showClientStats(clientIds[1]);
       assertNotNull(clientStatus1);
@@ -416,22 +384,18 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
       assertTrue(clientStatuses.length == 2);
       for (ClientHealthStatus status : clientStatuses) {
         System.out.println("<ExpectedString> ClientStats of the Server is  " + status + "</ExpectedString> ");
-
       }
 
-
       DistributedSystemMXBean dsBean = service.getDistributedSystemMXBean();
       assertEquals(2, dsBean.getNumClients());
       assertEquals(numSubscriptions, dsBean.getNumSubscriptions());
 
     } catch (Exception e) {
-      e.printStackTrace();
-      fail("Error while verifying cache server from remote member " + e);
+      fail("Error while verifying cache server from remote member", e);
     }
-
   }
 
-  protected static void put() {
+  private static void put() {
     Cache cache = GemFireCacheImpl.getInstance();
     Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r1);
@@ -457,11 +421,9 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     } catch (Exception e) {
       // sleep
     }
-
   }
 
-
-  public static void verifyStats(int serverPort) throws Exception {
+  private static void verifyStats(int serverPort) throws Exception {
     Cache cache = GemFireCacheImpl.getInstance();
     ManagementService service = ManagementService.getExistingManagementService(cache);
     CacheServerMXBean serverBean = service.getLocalCacheServerMXBean(serverPort);
@@ -477,5 +439,4 @@ public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
     ClientQueueDetail queueDetails = serverBean.showClientQueueDetails()[0];
     assertEquals(queueDetails.getQueueSize(), ccp.getQueueSizeStat());
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java b/geode-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
index 0c5c75d..59847b9 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
@@ -16,35 +16,49 @@
  */
 package com.gemstone.gemfire.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
 
 import com.gemstone.gemfire.LogWriter;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.statistics.SampleCollector;
-import com.gemstone.gemfire.management.internal.*;
-import com.gemstone.gemfire.test.dunit.*;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import java.util.*;
+import com.gemstone.gemfire.management.internal.FederatingManager;
+import com.gemstone.gemfire.management.internal.LocalManager;
+import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
+import com.gemstone.gemfire.management.internal.ManagementStrings;
+import com.gemstone.gemfire.management.internal.SystemManagementService;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
 
+@SuppressWarnings("serial")
 public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
 
   private static final int MAX_WAIT = 70 * 1000;
 
-  private static final long serialVersionUID = 1L;
   /**
    * log writer instance
    */
@@ -184,7 +198,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
     }
   }
 
-  @SuppressWarnings("serial")
   public void createCache(VM vm1) throws Exception {
     vm1.invoke(new SerializableRunnable("Create Cache") {
       public void run() {
@@ -194,7 +207,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
 
   }
 
-  @SuppressWarnings("serial")
   public void createCache(VM vm1, final Properties props) throws Exception {
     vm1.invoke(new SerializableRunnable("Create Cache") {
       public void run() {
@@ -240,7 +252,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
     return cache;
   }
 
-  @SuppressWarnings("serial")
   public void createManagementCache(VM vm1) throws Exception {
     vm1.invoke(new SerializableRunnable("Create Management Cache") {
       public void run() {
@@ -249,7 +260,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
     });
   }
 
-  @SuppressWarnings("serial")
   public void closeCache(VM vm1) throws Exception {
     vm1.invoke(new SerializableRunnable("Close Cache") {
       public void run() {
@@ -320,7 +330,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
    *
    * @throws Exception
    */
-  @SuppressWarnings("serial")
   public void startManagingNode(VM vm1) throws Exception {
     vm1.invoke(new SerializableRunnable("Start Being Managing Node") {
       public void run() {
@@ -345,7 +354,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
    *
    * @throws Exception
    */
-  @SuppressWarnings("serial")
   public void startManagingNodeAsync(VM vm1) throws Exception {
     vm1.invokeAsync(new SerializableRunnable("Start Being Managing Node") {
 
@@ -367,7 +375,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
    *
    * @throws Exception
    */
-  @SuppressWarnings("serial")
   public void stopManagingNode(VM vm1) throws Exception {
     vm1.invoke(new SerializableRunnable("Stop Being Managing Node") {
       public void run() {
@@ -389,7 +396,6 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
    * remove all the artifacts of management namely a) Notification region b)
    * Monitoring Region c) Management task should stop
    */
-  @SuppressWarnings("serial")
   public void checkManagedNodeCleanup(VM vm) throws Exception {
     vm.invoke(new SerializableRunnable("Managing Node Clean up") {
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java b/geode-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
deleted file mode 100644
index f2e6b5a..0000000
--- a/geode-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Class containing pdx tests.
- * 
- */
-package com.gemstone.gemfire.pdx;
-
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.net.URL;
-import java.net.URLClassLoader;
-
-import com.gemstone.gemfire.test.dunit.LogWriterUtils;
-
-public class VersionClassLoader {
-
-  /** If PdxPrms.initClassLoader is true, then randomly choose a versioned
-   *  class path and create and install a class loader for it on this thread.
-   *  
-   * @return The installed class loader (which includes a versioned class path)
-   *         or null if this call did not install a new class loader.
-   */
-  public static ClassLoader initClassLoader(long classVersion) throws Exception {
-    ClassLoader cl = Thread.currentThread().getContextClassLoader();
-    cl = ClassLoader.getSystemClassLoader();
-    
-    String alternateVersionClassPath =  System.getProperty("JTESTS") +
-    File.separator + ".." + File.separator + ".." + File.separator +
-    "classes" + File.separator + "version" + classVersion;
-    LogWriterUtils.getLogWriter().info("Initializing the class loader :" + alternateVersionClassPath);
-    ClassLoader versionCL = null;
-    try {
-      versionCL = new URLClassLoader(new URL[]{new File(alternateVersionClassPath).toURI().toURL()}, cl);
-      Thread.currentThread().setContextClassLoader(versionCL); 
-    } catch (Exception e) {
-      LogWriterUtils.getLogWriter().info("error", e);
-      throw new Exception("Failed to initialize the class loader. " + e.getMessage());
-    }
-    LogWriterUtils.getLogWriter().info("Setting/adding class loader with " + alternateVersionClassPath);
-    return versionCL;
-  }
-
-
-  /** Use reflection to create a new instance of a versioned class whose
-   *  name is specified by className.
-   *  
-   *  Since versioned classes are compled outside outside the <checkoutDir>/tests 
-   *  directory, code within <checkoutDir>/tests cannot directly reference
-   *  versioned classes, however the versioned class should be available at 
-   *  runtime if the test has installed the correct class loader.
-   *
-   * @param className The name of the versioned class to create. 
-   * @return A new instance of className.
-   */
-  public static Object getVersionedInstance(String className, Object[] args) throws Exception {
-    Object newObj = null;
-    try {
-      Class aClass = Class.forName(className, true, Thread.currentThread().getContextClassLoader());
-      if (args != null && args.length > 0) {
-        if (className.endsWith("PdxTestObject")){
-          Constructor constructor = aClass.getConstructor(int.class, String.class);
-          newObj = constructor.newInstance(((Integer)args[0]).intValue(), args[1]);          
-        } else if (className.endsWith("PortfolioPdxVersion1")) {
-          Constructor constructor = aClass.getConstructor(int.class, int.class);
-          newObj = constructor.newInstance(((Integer)args[0]).intValue(), ((Integer)args[1]).intValue());
-        } else if (className.endsWith("PdxVersionedNewPortfolio")) {
-          Constructor constructor = aClass.getConstructor(String.class, int.class);
-          newObj = constructor.newInstance(((String)args[0]), ((Integer)args[1]).intValue());
-        } else if (className.endsWith("PdxVersionedFieldType")) {
-          Constructor constructor = aClass.getConstructor( int.class);
-          newObj = constructor.newInstance(((Integer)args[0]).intValue());
-        } 
-      } else {
-        Constructor constructor = aClass.getConstructor();
-        newObj = constructor.newInstance();
-      }
-    } catch (Exception e) {
-      LogWriterUtils.getLogWriter().info("error", e);
-      throw new Exception("Failed to get the class instance. ClassName: " + className + "  error: ", e);
-    }
-    return newObj;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneEventListenerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneEventListenerJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneEventListenerJUnitTest.java
index 9660393..53c9db1 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneEventListenerJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneEventListenerJUnitTest.java
@@ -22,11 +22,9 @@ import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.*;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.AssertionFailedError;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
@@ -36,7 +34,6 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
 import com.gemstone.gemfire.cache.lucene.internal.repository.IndexRepository;
 import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
-import com.gemstone.gemfire.internal.cache.BucketNotFoundException;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 /**
@@ -47,7 +44,7 @@ import com.gemstone.gemfire.test.junit.categories.UnitTest;
 public class LuceneEventListenerJUnitTest {
 
   @Test
-  public void testProcessBatch() throws IOException, BucketNotFoundException {
+  public void testProcessBatch() throws Exception {
     RepositoryManager manager = Mockito.mock(RepositoryManager.class);
     IndexRepository repo1 = Mockito.mock(IndexRepository.class);
     IndexRepository repo2 = Mockito.mock(IndexRepository.class);
@@ -84,7 +81,7 @@ public class LuceneEventListenerJUnitTest {
         break;
       case 2:
         Mockito.when(event.getOperation()).thenReturn(Operation.DESTROY);
-        Mockito.when(event.getDeserializedValue()).thenThrow(new AssertionFailedError());
+        Mockito.when(event.getDeserializedValue()).thenThrow(new AssertionError());
         break;
       }
 


[3/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
index 76d8bc9..6a0e660 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
@@ -16,32 +16,27 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Tests faulting in from current oplog, old oplog
  * and htree for different modes (overflow only, persist+overflow : Sync/Async)
- * 
- *
  */
 @Category(IntegrationTest.class)
-public class FaultingInJUnitTest extends DiskRegionTestingBase
-{
-  protected volatile boolean hasBeenNotified;
-  
-  
+public class FaultingInJUnitTest extends DiskRegionTestingBase {
+
+  private volatile boolean hasBeenNotified;
+
   private DiskRegionProperties diskProps = new DiskRegionProperties();
   
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     deleteFiles();
     diskProps.setDiskDirs(dirs);
     diskProps.setCompactionThreshold(100);
@@ -49,19 +44,16 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
   }
   
-  @After
-  public void tearDown() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeDown();
     deleteFiles();
-    super.tearDown();
   }
 
   /**
    * fault in a value from teh current oplog
-   *
    */
-  void faultInFromCurrentOplog()
-  { 
+  private void faultInFromCurrentOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(200), new Integer(200));
@@ -73,10 +65,8 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
 
   /**
    * fault in a value from an old oplog
-   *
    */
-  void faultInFromOldOplog()
-  {
+  private void faultInFromOldOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(200), new Integer(200));
@@ -89,10 +79,8 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
 
   /**
    * fault in a value that has been copied forward by compaction
-   *
    */
-  void faultInFromCompactedOplog()
-  {
+  private void faultInFromCompactedOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(101), new Integer(101));
@@ -147,89 +135,74 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
    * test OverflowOnly Sync Faultin  From CurrentOplog
    */
   @Test
-  public void testOverflowOnlyFaultinSyncFromCurrentOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinSyncFromOldOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinSyncFromCompactedOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromCurrentOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromOldOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromOldOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromCompactedOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
   
   @Test
-  public void testOverflowAndPersistFaultinSyncFromCurrentOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinSyncFromOldOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinSyncFromCompactedOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromCurrentOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromOldOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromCompactedOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
-
-
-  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
index 6ac9e60..c7e5d30 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
@@ -16,12 +16,11 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import org.junit.After;
+import static org.junit.Assert.*;
+
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
@@ -29,26 +28,25 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * The test will verify <br>
  * 1. Multiple oplogs are being rolled at once <br>
  * 2. The Number of entries getting logged to the HTree are taking care of creation 
- * 
  */
 @Category(IntegrationTest.class)
-public class MultipleOplogsRollingFeatureJUnitTest extends
-    DiskRegionTestingBase
-{
+public class MultipleOplogsRollingFeatureJUnitTest extends DiskRegionTestingBase {
 
-  protected Object mutex = new Object();
+  private volatile boolean FLAG = false;
 
-  protected boolean CALLBACK_SET = false;
+  private Object mutex = new Object();
 
-  protected volatile boolean FLAG = false;
+  private boolean CALLBACK_SET = false;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @After
-  public void tearDown() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
-    super.tearDown();
+  }
+
+  @Override
+  protected final void postTearDown() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 
@@ -58,8 +56,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
    * 2. The Number of entries are properly conflated
    */
   @Test
-  public void testMultipleRolling()
-  {
+  public void testMultipleRolling() {
     System.setProperty("gemfire.MAX_OPLOGS_PER_COMPACTION", "17");
     try {
       deleteFiles();
@@ -161,8 +158,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private void waitForCompactor(long maxWaitingTime)
-  {
+  private void waitForCompactor(long maxWaitingTime) {
     long maxWaitTime = maxWaitingTime;
     long start = System.currentTimeMillis();
     while (!FLAG) { // wait until
@@ -179,8 +175,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private void addEntries(int opLogNum, int valueSize)
-  {
+  private void addEntries(int opLogNum, int valueSize) {
     assertNotNull(region);
     byte[] val = new byte[valueSize];
     for (int i = 0; i < valueSize; ++i) {
@@ -217,8 +212,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private CacheObserver getCacheObserver()
-  {
+  private CacheObserver getCacheObserver() {
     return (new CacheObserverAdapter() {
 
       public void beforeGoingToCompact()
@@ -251,6 +245,5 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
 
       }
     });
-
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
index d1be04d..b259b79 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
@@ -117,17 +117,13 @@ public class OplogJUnitTest extends DiskRegionTestingBase {
   protected volatile Thread rollerThread = null;
 
   @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
   }
 
   @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
+  protected final void postTearDown() throws Exception {
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
index 6bac28b..bbd22fa 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
@@ -22,17 +22,16 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
@@ -41,19 +40,16 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * Testing methods for SimpleDiskRegion.java api's
  * 
  * @since 5.1
- *  
  */
 @Category(IntegrationTest.class)
-public class SimpleDiskRegionJUnitTest extends DiskRegionTestingBase
-{
+public class SimpleDiskRegionJUnitTest extends DiskRegionTestingBase {
 
-  protected Set keyIds = Collections.synchronizedSet(new HashSet());
+  private Set keyIds = Collections.synchronizedSet(new HashSet());
 
   private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
index 49e07b4..8b9bbaf 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
@@ -16,41 +16,32 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- * 
  * Disk region Perf test for Overflow only with ASync writes. 1) Performance of
  * get operation for entry in memory.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
 //    Properties properties = new Properties();
     diskProps.setBytesThreshold(10000l);
@@ -61,10 +52,8 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,21 +62,17 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
     }
   }
 
- 
   private static int ENTRY_SIZE = 1024;
   
   /* OP_COUNT can be increased/decrease as per the requirement.
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
-  
   private static int OP_COUNT = 1000;
 
   @Test
-  public void testPopulatefor1Kbwrites()
-  {
+  public void testPopulatefor1Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -129,8 +114,5 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is in memory :" + statsGet);
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
index dcb6af1..be87bc1 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
@@ -16,12 +16,10 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
 
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -40,17 +38,24 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * which will fault in.
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private static int ENTRY_SIZE = 1024 * 5;
+
+  /**
+   * Do not change the value OP_COUNT = 400
+   * The test case is dependent on this value.
+   */
+  private static int OP_COUNT = 400;
+
+  private static int HALF_OP_COUNT = OP_COUNT / 2;
+
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setTimeInterval(1000l);
     diskProps.setBytesThreshold(10000l);
@@ -58,13 +63,10 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
         diskProps);
     log = ds.getLogWriter();
-
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,20 +75,8 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
     }
   }
 
- 
-  private static int ENTRY_SIZE = 1024 * 5;
-  
- /* Do not change the value OP_COUNT = 400
-  * The test case is dependent on this value.
-  */
-  
-  private static int OP_COUNT = 400;
-
-  private static int HALF_OP_COUNT = OP_COUNT / 2;
-
   @Test
-  public void testPopulatefor5Kbwrites()
-  {
+  public void testPopulatefor5Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
     LRUStatistics lruStats = getLRUStats(region);
     // Put in larger stuff until we start evicting
@@ -153,15 +143,10 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is fauting in :" + statsGet);
-
   }
 
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
index 95a456c..223f04e 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
@@ -16,16 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
@@ -34,35 +30,36 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 /**
  * Disk region Perf test for Overflow only with Sync writes. 1) Performance of
  * get operation for entry in memory.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskRegionTestingBase {
+
+  private static int ENTRY_SIZE = 1024;
+
+  private static int OP_COUNT = 10000;
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
+  @Override
+  protected final void preSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
-    super.setUp();
+  }
 
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setOverFlowCapacity(100000);
     region = DiskRegionHelperFactory
-        .getSyncOverFlowOnlyRegion(cache, diskProps);
-    
+      .getSyncOverFlowOnlyRegion(cache, diskProps);
+
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -71,15 +68,8 @@ public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskReg
     }
   }
 
-  
-
-  private static int ENTRY_SIZE = 1024;
-
-  private static int OP_COUNT = 10000;
-
   @Test
-  public void testPopulatefor1Kbwrites()
-  {
+  public void testPopulatefor1Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -122,8 +112,5 @@ public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskReg
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is in memory :" + statsGet);
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
index 208b39f..945a3e5 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
@@ -16,22 +16,20 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import static org.junit.Assert.*;
+
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.LogWriter;
+import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
-import com.gemstone.gemfire.internal.cache.*;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -40,19 +38,26 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * which will fault in.
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBase {
+
+  private static int ENTRY_SIZE = 1024 * 5;
+
+  /**
+   * Do not change the value OP_COUNT = 400
+   * The test case is dependent on this value.
+   */
+  private static int OP_COUNT = 400;
+
+  private static int HALF_OP_COUNT = OP_COUNT / 2;
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setOverFlowCapacity(1000);
     region = DiskRegionHelperFactory
@@ -61,10 +66,8 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,20 +76,8 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
     }
   }
 
- 
-  private static int ENTRY_SIZE = 1024 * 5;
-
-  /* Do not change the value OP_COUNT = 400
-   * The test case is dependent on this value.
-   */
-  
-  private static int OP_COUNT = 400;
-
-  private static int HALF_OP_COUNT = OP_COUNT / 2;
-
   @Test
-  public void testPopulatefor5Kbwrites()
-  {
+  public void testPopulatefor5Kbwrites() throws Exception {
 //    RegionAttributes ra = region.getAttributes();
 
     LRUStatistics lruStats = getLRUStats(region);
@@ -155,15 +146,11 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is fauting in :" + statsGet);
-
   }
 
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
-
   }
 
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
index b014cb2..296ae00 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
@@ -55,10 +55,8 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
 
   DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setRegionName("OverflowAsyncRollingOpLogRegion");
     diskProps.setDiskDirs(dirs);
     this.log = ds.getLogWriter();
@@ -69,13 +67,10 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
     diskProps.setMaxOplogSize(10485760l);
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
         diskProps);
-
   }
 
   @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -84,8 +79,6 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
     }
   }
 
-  
-
   @Test
   public void testGetPerfRollingOpog()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
index d94cf73..da0bb5e 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
@@ -51,10 +51,8 @@ public class DiskRegionOverflowSyncRollingOpLogJUnitTest extends
 
   DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     this.log = ds.getLogWriter();
     diskProps.setRolling(true);
@@ -62,16 +60,8 @@ public class DiskRegionOverflowSyncRollingOpLogJUnitTest extends
     diskProps.setCompactionThreshold(100);
     region = DiskRegionHelperFactory
         .getSyncOverFlowOnlyRegion(cache, diskProps);
-
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-    
-  }
-  
   @Test
   public void testGetPerfRollingOpog()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
index 5ec4af8..feef5c7 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
@@ -16,17 +16,15 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
@@ -37,43 +35,28 @@ import com.gemstone.gemfire.test.junit.categories.PerformanceTest;
 /**
  * Consolidated Disk Region Perftest. Overflow, Persist, OverflowWithPersist
  * modes are tested for Sync, AsyncWithBuffer and AsyncWithoutBufer writes.
- *  
  */
 @Category(PerformanceTest.class)
 @Ignore("Tests have no assertions")
-public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
-{
-  LogWriter log = null;
+public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  String stats = null;
+  private static int counter = 0;
 
-  String stats_ForSameKeyputs = null;
+  private LogWriter log = null;
 
-  static int counter = 0;
+  private String stats = null;
 
-  // protected static File[] dirs = null;
+  private String stats_ForSameKeyputs = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  //*********Test Cases **************
-  //********Overflowonly tests *********
   @Test
-  public void testOverflowSync1()
-  {
+  public void testOverflowSync1() throws Exception {
     try {
       //Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowSync1Dir1");
@@ -116,8 +99,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testOverflowSync1
 
   @Test
-  public void testOverflowASyncWithBuffer2()
-  {
+  public void testOverflowASyncWithBuffer2() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithBuffer2Dir1");
@@ -161,8 +143,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
      } //end of testOverflowASyncWithBuffer2
 
   @Test
-  public void testOverflowASyncWithoutBuffer3()
-  {
+  public void testOverflowASyncWithoutBuffer3() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithoutBuffer3Dir1");
@@ -207,10 +188,8 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testOverflowASyncWithoutBuffer3
 
-  //******** PersistOnly Tests ****************
   @Test
-  public void testpersistSync4()
-  {
+  public void testpersistSync4() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistSync4Dir1");
@@ -252,8 +231,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistSync4
 
   @Test
-  public void testpersistASyncWithBuffer5()
-  {
+  public void testpersistASyncWithBuffer5() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistASyncWithBuffer5Dir1");
@@ -298,8 +276,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistASyncWithBuffer5
 
   @Test
-  public void testPersistASyncWithoutBuffer6()
-  {
+  public void testPersistASyncWithoutBuffer6() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistASyncWithoutBuffer6Dir1");
@@ -345,10 +322,8 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testPersistASyncWithoutBuffer
 
-  //*************Persist with Overflow tests ****************
   @Test
-  public void testPersistOverflowSync7()
-  {
+  public void testPersistOverflowSync7() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowSync7Dir1");
@@ -392,8 +367,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistOverflowSync
 
   @Test
-  public void testPersistOverflowASyncWithBuffer8()
-  {
+  public void testPersistOverflowASyncWithBuffer8() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithBuffer8Dir1");
@@ -440,8 +414,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testpersistOverflowASyncWithBuffer8
 
   @Test
-  public void testPersistOverflowASyncWithoutBuffer9()
-  {
+  public void testPersistOverflowASyncWithoutBuffer9() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithoutBuffer9Dir1");
@@ -487,7 +460,6 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testPersistOverflowASyncWithoutBuffer9
 
-  //************** test data population *******************
   public static int ENTRY_SIZE = 1024;
   
   /**
@@ -495,15 +467,12 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
-  
   public static int OP_COUNT = 100;
 
   public static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
-  public void populateData()
-  {
+  public void populateData() {
     //Put for validation.
     putForValidation(region);
     
@@ -530,8 +499,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     log.info(stats);
   }
 
-  public void populateDataPutOnSameKey()
-  {
+  public void populateDataPutOnSameKey() {
 //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -556,15 +524,13 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     log.info(stats_ForSameKeyputs);
   }
 
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     for (int i = 0; i < 4; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
         files[j].delete();
       }
     }
-
   }
 
 }// end of DiskRegionPerfJUnitTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
index 6dd9867..14b0197 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
@@ -16,13 +16,10 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -37,20 +34,16 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Disk region perf test for Persist only with sync writes.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
-{
+public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
@@ -58,29 +51,19 @@ public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
-
-  
-
   private static int ENTRY_SIZE = 1024;
   
   /* OP_COUNT can be increased/decrease as per the requirement.
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
   private static int OP_COUNT = 1000;
 
   private static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
   @Test
-  public void testPopulate1kbwrites()
-  {
+  public void testPopulate1kbwrites() {
     RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -172,8 +155,7 @@ public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testPopulate5kbwrites()
-  {
+  public void testPopulate5kbwrites() {
     ENTRY_SIZE = 1024 * 5;
     
     /* OP_COUNT can be increased/decrease as per the requirement.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
index 1c06543..10a6f20 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
@@ -16,66 +16,52 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import static org.junit.Assert.*;
+
 import java.io.File;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.internal.cache.*;
+import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
+import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
+import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Consolidated Disk Region Perftest. Overflow, Persist, OverflowWithPersist
  * modes are tested for Sync, AsyncWithBuffer and AsyncWithoutBufer writes.
  * Roling oplog is set to true with maxOplogSize = 20 mb
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  LogWriter log = null;
+  private LogWriter log = null;
 
-  String stats = null;
+  private String stats = null;
 
-  String stats_ForSameKeyputs = null;
+  private String stats_ForSameKeyputs = null;
 
   /**
    * To run DiskRegionRollOpLogPerfJUnitTest to produce the Perf numbers set
    * runPerfTest to true. Also ,one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
    */
-  boolean runPerfTest = false;
+  private boolean runPerfTest = false;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  //*********Test Cases **************
-  //********Overflowonly tests *********
   @Test
-  public void testOverflowSyncRollOlg1()
-  {
+  public void testOverflowSyncRollOlg1() {
     try {
       //Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowSyncRollOlg1Dir1");
@@ -124,8 +110,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testOverflowSync1
 
   @Test
-  public void testOverflowASyncWithBufferRollOlg2()
-  {
+  public void testOverflowASyncWithBufferRollOlg2() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithBufferRollOlg2Dir1");
@@ -177,8 +162,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testOverflowASyncWithBuffer2
 
   @Test
-  public void testOverflowASyncWithoutBufferRollOlg3()
-  {
+  public void testOverflowASyncWithoutBufferRollOlg3() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithoutBufferRollOlg3Dir1");
@@ -229,10 +213,8 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     deleteFiles();
   } //end of testOverflowASyncWithoutBuffer3
 
-  //******** PersistOnly Tests ****************
   @Test
-  public void testpersistSyncRollOlg4()
-  {
+  public void testpersistSyncRollOlg4() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistSyncRollOlg4Dir1");
@@ -281,8 +263,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistSync4
 
   @Test
-  public void testpersistASyncWithBufferRollOlg5()
-  {
+  public void testpersistASyncWithBufferRollOlg5() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistASyncWithBufferRollOlg5Dir1");
@@ -334,8 +315,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistASyncWithBuffer5
 
   @Test
-  public void testPersistASyncWithoutBufferRollOlg6()
-  {
+  public void testPersistASyncWithoutBufferRollOlg6() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistASyncWithoutBufferRollOlg6Dir1");
@@ -386,10 +366,8 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     closeDown();
   } //end of testPersistASyncWithoutBuffer
 
-  //*************Persist with Overflow tests ****************
   @Test
-  public void testPersistOverflowSyncRollOlg7()
-  {
+  public void testPersistOverflowSyncRollOlg7() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowSyncRollOlg7Dir1");
@@ -440,8 +418,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistOverflowSync
 
   @Test
-  public void testPersistOverflowASyncWithBufferRollOlg8()
-  {
+  public void testPersistOverflowASyncWithBufferRollOlg8() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithBufferRollOlg8Dir1");
@@ -494,8 +471,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testpersistOverflowASyncWithBuffer8
 
   @Test
-  public void testPersistOverflowASyncWithoutBufferRollOlg9()
-  {
+  public void testPersistOverflowASyncWithoutBufferRollOlg9() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithoutBufferRollOlg9Dir1");
@@ -547,7 +523,6 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     closeDown();
   } //end of testPersistOverflowASyncWithoutBuffer9
 
-  //************** test data population *******************
   public static int ENTRY_SIZE = 1024;
 
   /**
@@ -555,15 +530,12 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
    * be set as higher value such as 1000000, one needs to set the VM heap size
    * accordingly. (For example:Default setting in build.xml is <jvmarg
    * value="-Xmx256M"/>
-   *  
    */
-
   public static int OP_COUNT = 1000;
 
   public static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
-  public void populateData0to60k()
-  {
+  public void populateData0to60k() {
     final byte[] value = new byte[ENTRY_SIZE];
     Arrays.fill(value, (byte)77);
     for (int i = 0; i < 60000; i++) {
@@ -571,11 +543,9 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
       // System.out.println(i);
     }
     System.out.println(" done with putting first 60k entries");
-
   }
 
-  public void populateData60kto100k()
-  {
+  public void populateData60kto100k() {
     //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -599,8 +569,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     log.info(stats);
   }
 
-  public void populateDataPutOnSameKey()
-  {
+  public void populateDataPutOnSameKey() {
     //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -624,15 +593,12 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     log.info(stats_ForSameKeyputs);
   }
 
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     for (int i = 0; i < 4; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
         files[j].delete();
       }
     }
-
   }
-
 }// end of DiskRegionRollOpLogPerfJUnitTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
index 6c572b1..61c9b45 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
+import static com.gemstone.gemfire.test.dunit.Assert.*;
+
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -26,8 +28,6 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import junit.framework.Assert;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
@@ -45,39 +45,27 @@ import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
 import com.gemstone.gemfire.test.junit.categories.FlakyTest;
 
-public class HARegionQueueDUnitTest extends DistributedTestCase {
-  VM vm0 = null;
-
-  VM vm1 = null;
-
-  VM vm3 = null;
+public class HARegionQueueDUnitTest extends JUnit4DistributedTestCase {
 
-  VM vm2 = null;
+  private static volatile boolean toCnt = true;
+  private static volatile Thread createQueuesThread;
 
-  protected static Cache cache = null;
+  private static Cache cache = null;
+  private static HARegionQueue hrq = null;
+  private static Thread[] opThreads;
 
-  protected static HARegionQueue hrq = null;
-
-//  private static int counter = 0;
-
-  protected static volatile boolean toCnt = true;
-
-  protected static Thread opThreads[];
-  
-  protected static volatile Thread createQueuesThread;
-
-  /** constructor */
-  public HARegionQueueDUnitTest(String name) {
-    super(name);
-  }
+  private VM vm0 = null;
+  private VM vm1 = null;
+  private VM vm3 = null;
+  private VM vm2 = null;
 
   /**
    * get the VM's
@@ -100,13 +88,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(() -> HARegionQueueDUnitTest.closeCache());
     vm2.invoke(() -> HARegionQueueDUnitTest.closeCache());
     vm3.invoke(() -> HARegionQueueDUnitTest.closeCache());
+    
+    cache = null;
+    hrq = null;
+    opThreads = null;
   }
 
   /**
    * create cache
    */
-  protected Cache createCache() throws CacheException
-  {
+  private Cache createCache() throws CacheException {
     Properties props = new Properties();
     DistributedSystem ds = getSystem(props);
     ds.disconnect();
@@ -125,10 +116,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * assert that the put has not propagated from VM1 to VM2 4) do a put in VM2
    * 5) assert that the value in VM1 has not changed to due to put in VM2 6)
    * assert put in VM2 was successful by doing a get
-   *
    */
-  public void testLocalPut()
-  {
+  @Test
+  public void testLocalPut() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
@@ -146,10 +136,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * assert respective puts the VMs were successful by doing a get 7)
    * localDestroy key in VM1 8) assert key has been destroyed in VM1 9) assert
    * key has not been destroyed in VM2
-   *
    */
-  public void testLocalDestroy()
-  {
+  @Test
+  public void testLocalDestroy() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
@@ -167,10 +156,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * value in VM1 to assert put has happened successfully 4) Create mirrored
    * HARegion region1 in VM2 5) do a get in VM2 to verify that value was got
    * through GII 6) do a put in VM2 7) assert put in VM2 was successful
-   *
    */
-  public void testGII()
-  {
+  @Test
+  public void testGII() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
     vm0.invoke(() -> HARegionQueueDUnitTest.getValue1());
@@ -178,37 +166,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(() -> HARegionQueueDUnitTest.getValue1());
     vm1.invoke(() -> HARegionQueueDUnitTest.putValue2());
     vm1.invoke(() -> HARegionQueueDUnitTest.getValue2());
-
   }
 
   /**
-   * Tests the relevant data structures are updated after GII happens.
-   *
-   * In this test, a HARegion is created in vm0. 10 conflatable objects are put
-   * in vm0's region HARegion is then created in vm1. After region creation, the
-   * verification whether the relevant data structuers have been updated is
-   * done.
-   *
-   */
- /* public void testGIIAndMapUpdates()
-  {
-    vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue2());
-    vm0.invoke(() -> HARegionQueueDUnitTest.putConflatables());
-    vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueue2());
-    vm0.invoke(() -> HARegionQueueDUnitTest.clearRegion());
-    vm1.invoke(() -> HARegionQueueDUnitTest.verifyMapsAndData());
-
-  } */
-
-  /**
    * 1) Create mirrored HARegion region1 in VM1 2) do a put in VM1 3) get teh
    * value in VM1 to assert put has happened successfully 4) Create mirrored
    * HARegion region1 in VM2 5) do a get in VM2 to verify that value was got
    * through GII 6) do a put in VM2 7) assert put in VM2 was successful
-   *
    */
-  public void testQRM()
-  {
+  @Test
+  public void testQRM() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm0.invoke(() -> HARegionQueueDUnitTest.verifyAddingDispatchMesgs());
@@ -217,29 +184,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1)Create regionqueue on VM0 and VM1 2) put same conflated object from VM1
-   * aand VM2 3)perform take() operation from VM0 4) Wait for the QRM to
-   * execute. 4)check the size of the regionqueue in VM1. It should be zero
-   * because QRM should remove entry from the regionqueue of VM1
-   * 
-   * 
-   */
-  
-  /**
    * Behaviour of take() has been changed for reliable messaging feature. Region queue take()
    * operation will no longer add to the Dispatch Message Map. Hence disabling the test - SUYOG
    *
    * Test for #35988 HARegionQueue.take() is not functioning as expected
    */
-  @Ignore("TODO")
+  @Ignore("TODO: this test was disabled")
   @Test
   public void testBugNo35988() throws Exception {
-    
-    CacheSerializableRunnable createQueue = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue and start thread") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+    CacheSerializableRunnable createQueue = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
         // 1 second , thus we need to carfully evaluate what to do. Though
         //in this case 1 second instead of 500 ms will work
@@ -256,8 +212,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               new byte[] { 0 }, 1, 1), false, "dummy"));
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
@@ -265,28 +220,28 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(createQueue);
 
     vm0.invoke(new CacheSerializableRunnable("takeFromVm0") {
+      @Override
       public void run2() throws CacheException {
         try {
           Conflatable obj = (Conflatable)hrq.take();
           assertNotNull(obj);
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     });
 
-
-
     vm1.invoke(new CacheSerializableRunnable("checkInVm1") {
-      public void run2() throws CacheException
-      {
+      @Override
+      public void run2() throws CacheException {
         WaitCriterion ev = new WaitCriterion() {
+          @Override
           public boolean done() {
             Thread.yield(); // TODO is this necessary?
             return hrq.size() == 0;
           }
+          @Override
           public String description() {
             return null;
           }
@@ -299,32 +254,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
   /**
    * create a client with 2 regions sharing a common writer
-   *
-   * @throws Exception
    */
-
-  public static void createRegion() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegion() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
-    HARegion.getInstance("HARegionQueueDUnitTest_region", (GemFireCacheImpl)cache,
-        null, factory.create());
+    HARegion.getInstance("HARegionQueueDUnitTest_region", (GemFireCacheImpl)cache, null, factory.create());
   }
 
-  /**
-   *
-   *
-   * @throws Exception
-   */
-
-  public static void createRegionQueue() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueue() throws Exception   {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     /*
      * AttributesFactory factory = new AttributesFactory();
@@ -342,13 +283,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         "HARegionQueueDUnitTest_region");
     hrq.put(c1);
     hrq.put(c2);
-
   }
 
-  public static void createRegionQueue2() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueue2() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     /*
      * AttributesFactory factory = new AttributesFactory();
@@ -362,8 +300,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
   }
 
-  public static void clearRegion()
-  {
+  private static void clearRegion() {
     try {
       Iterator iterator = hrq.getRegion().keys().iterator();
       while (iterator.hasNext()) {
@@ -371,31 +308,31 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      fail("Exception occured while trying to destroy region");
+      fail("Exception occured while trying to destroy region", e);
     }
 
   }
 
-  public static void verifyAddingDispatchMesgs()
-  {
-    Assert.assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting()
+  private static void verifyAddingDispatchMesgs() {
+    assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting()
         .isEmpty());
     hrq.addDispatchedMessage(new ThreadIdentifier(new byte[1], 1), 1);
-    Assert.assertTrue(!HARegionQueue.getDispatchedMessagesMapForTesting()
+    assertTrue(!HARegionQueue.getDispatchedMessagesMapForTesting()
         .isEmpty());
   }
 
-  public static void verifyDispatchedMessagesRemoved()
+  private static void verifyDispatchedMessagesRemoved()
   {
     try {
       final Region region = hrq.getRegion();
-      // wait until we have a dead
-      // server
+      // wait until we have a dead server
       WaitCriterion ev = new WaitCriterion() {
+        @Override
         public boolean done() {
           Thread.yield(); // TODO is this necessary?
           return region.get(new Long(0)) == null;
         }
+        @Override
         public String description() {
           return null;
         }
@@ -413,16 +350,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception e) {
-      fail("test failed due to an exception :  " + e);
+      fail("test failed due to an exception", e);
     }
   }
 
   /**
    * close the cache
-   * 
    */
-  public static void closeCache()
-  {
+  private static void closeCache() {
     if (cache != null && !cache.isClosed()) {
       cache.close();
       cache.getDistributedSystem().disconnect();
@@ -431,22 +366,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
   /**
    * do puts on key-1
-   *
    */
-  public static void putValue1()
-  {
+  private static void putValue1() {
     try {
       Region r1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       r1.put("key-1", "value-1");
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
-  public static void putConflatables()
-  {
+  private static void putConflatables() {
     try {
       Region r1 = hrq.getRegion();
       for (int i = 1; i < 11; i++) {
@@ -456,54 +387,51 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * verifies the data has been populated correctly after GII
-   *
    */
-  public static void verifyMapsAndData()
-  {
+  private static void verifyMapsAndData() {
     try {
       HARegion r1 = (HARegion)hrq.getRegion();
       // region should not be null
-      Assert.assertNotNull(" Did not expect the HARegion to be null but it is",
+      assertNotNull(" Did not expect the HARegion to be null but it is",
           r1);
       // it should have ten non null entries
       for (int i = 1; i < 11; i++) {
-        Assert.assertNotNull(" Did not expect the entry to be null but it is",
+        assertNotNull(" Did not expect the entry to be null but it is",
             r1.get(new Long(i)));
       }
       // HARegionQueue should not be null
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the HARegionQueue to be null but it is", hrq);
 
       Map conflationMap = hrq.getConflationMapForTesting();
       // conflationMap size should be greater than 0
-      Assert.assertTrue(
+      assertTrue(
           " Did not expect the conflationMap size to be 0 but it is",
           conflationMap.size() > 0);
       Map internalMap = (Map)conflationMap.get("HARegionQueueDUnitTest_region");
       // internal map should not be null. it should be present
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the internalMap to be null but it is", internalMap);
       // get and verify the entries in the conflation map.
       for (int i = 1; i < 11; i++) {
-        Assert.assertTrue(
+        assertTrue(
             " Did not expect the entry not to be equal but it is", internalMap
                 .get("key" + i).equals(new Long(i)));
       }
       Map eventMap = hrq.getEventsMapForTesting();
       // DACE should not be null
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the result (DACE object) to be null but it is",
           eventMap.get(new ThreadIdentifier(new byte[] { 1 }, 1)));
       Set counterSet = hrq.getCurrentCounterSet(new EventID(new byte[] { 1 },
           1, 1));
-      Assert.assertTrue(
+      assertTrue(
           " excpected the counter set size to be 10 but it is not so",
           counterSet.size() == 10);
       long i = 1;
@@ -511,12 +439,12 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       // verify the order of the iteration. it should be 1 - 10. The underlying
       // set is a LinkedHashSet
       while (iterator.hasNext()) {
-        Assert.assertTrue(((Long)iterator.next()).longValue() == i);
+        assertTrue(((Long)iterator.next()).longValue() == i);
         i++;
       }
       // The last dispactchde sequence Id should be -1 since no dispatch has
       // been made
-      Assert.assertTrue(hrq.getLastDispatchedSequenceId(new EventID(
+      assertTrue(hrq.getLastDispatchedSequenceId(new EventID(
           new byte[] { 1 }, 1, 1)) == -1);
 
       // sleep for 8.0 seconds. Everythign should expire and everything should
@@ -524,8 +452,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       Thread.sleep(7500);
 
       for (int j = 1; j < 11; j++) {
-        Assert
-            .assertNull(
+        assertNull(
                 "expected the entry to be null since expiry time exceeded but it is not so",
                 r1.get(new Long(j)));
       }
@@ -533,50 +460,41 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       internalMap = (Map)hrq.getConflationMapForTesting().get(
           "HARegionQueueDUnitTest_region");
 
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the internalMap to be null but it is", internalMap);
-      Assert
-          .assertTrue(
+      assertTrue(
               "internalMap (conflation) should have been emptry since expiry of all entries has been exceeded but it is not so",
               internalMap.isEmpty());
-      Assert
-          .assertTrue(
+      assertTrue(
               "eventMap should have been emptry since expiry of all entries has been exceeded but it is not so",
               eventMap.isEmpty());
-      Assert
-          .assertTrue(
+      assertTrue(
               "counter set should have been emptry since expiry of all entries has been exceeded but it is not so",
               counterSet.isEmpty());
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * do puts on key-1,value-2
-   *
    */
-  public static void putValue2()
-  {
+  private static void putValue2() {
     try {
       Region r1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       r1.put("key-1", "value-2");
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getValue1()
-  {
+  private static void getValue1() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1").equals("value-1"))) {
@@ -585,17 +503,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getNull()
-  {
+  private static void getNull() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1") == (null))) {
@@ -604,17 +519,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getValue2()
-  {
+  public static void getValue2() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1").equals("value-2"))) {
@@ -623,24 +535,20 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * destroy key-1
-   *
    */
-  public static void destroy()
-  {
+  public static void destroy() {
     try {
       Region region1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       region1.localDestroy("key-1");
     }
     catch (Exception e) {
-      e.printStackTrace();
-      fail("test failed due to exception in destroy ");
+      fail("test failed due to exception in destroy", e);
     }
   }
 
@@ -649,11 +557,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in non blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnNonBlockingQueue()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnNonBlockingQueue() throws Exception {
     concurrentOperationsDunitTest(false, Scope.DISTRIBUTED_ACK);
   }
 
@@ -662,11 +568,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in non blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnNonBlockingQueueWithDNoAckRegion()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnNonBlockingQueueWithDNoAckRegion() throws Exception {
     concurrentOperationsDunitTest(false, Scope.DISTRIBUTED_NO_ACK);
   }
 
@@ -675,25 +579,19 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnBlockingQueue()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnBlockingQueue() throws Exception {
     concurrentOperationsDunitTest(true, Scope.DISTRIBUTED_ACK);
   }
 
-  private void concurrentOperationsDunitTest(
-      final boolean createBlockingQueue, final Scope rscope)
-  {
+  private void concurrentOperationsDunitTest(final boolean createBlockingQueue, final Scope rscope) {
     // Create Cache and HARegionQueue in all the 4 VMs.
 
-    CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable(
-        "CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-            "HARegionQueueDUnitTest_region");
+    CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable("CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         System.getProperties()
             .put("QueueRemovalThreadWaitTime", "2000");
         cache = test.createCache();
@@ -713,12 +611,11 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
           }
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
         factory.addCacheListener(new CacheListenerAdapter() {
-          public void afterCreate(final EntryEvent event)
-          {
+          @Override
+          public void afterCreate(final EntryEvent event) {
             Conflatable conflatable = new ConflatableObject(event.getKey(),
                 event.getNewValue(), ((EntryEventImpl)event).getEventId(),
                 false, event.getRegion().getFullPath());
@@ -727,14 +624,12 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               hrq.put(conflatable);
             }
             catch (Exception e) {
-              e.printStackTrace();
-              fail("The put operation in queue did not succeed due to exception ="
-                  + e);
+              fail("The put operation in queue did not succeed due to exception =", e);
             }
           }
 
-          public void afterUpdate(final EntryEvent event)
-          {
+          @Override
+          public void afterUpdate(final EntryEvent event) {
             Conflatable conflatable = new ConflatableObject(event.getKey(),
                 event.getNewValue(), ((EntryEventImpl)event).getEventId(),
                 true, event.getRegion().getFullPath());
@@ -743,9 +638,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               hrq.put(conflatable);
             }
             catch (Exception e) {
-              e.printStackTrace();
-              fail("The put operation in queue did not succeed due to exception ="
-                  + e);
+              fail("The put operation in queue did not succeed due to exception =", e);
             }
           }
 
@@ -760,11 +653,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(createRgnsAndQueues);
     vm2.invoke(createRgnsAndQueues);
     vm3.invoke(createRgnsAndQueues);
-    CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable(
-        "Spawn multipe threads which do various operations") {
+    CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable("Spawn multiple threads which do various operations") {
 
-      public void run2() throws CacheException
-      {
+      @Override
+      public void run2() throws CacheException {
         opThreads = new Thread[4 + 2 + 2 + 2];
         for (int i = 0; i < 4; ++i) {
           opThreads[i] = new Thread(new RunOp(RunOp.PUT, i), "ID="
@@ -801,13 +693,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       Thread.sleep(2000);
     }
     catch (InterruptedException e1) {
-      fail("Test failed as the test thread encoutered exception in sleep");
+      fail("Test failed as the test thread encoutered exception in sleep", e1);
     }
+
     // Asif : In case of blocking HARegionQueue do some extra puts so that the
     // blocking threads
     // are exited
-    CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable(
-        "Toggle the flag to signal end of threads") {
+    CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable("Toggle the flag to signal end of threads") {
+      @Override
       public void run2() throws CacheException {
         toCnt = false;
         if (createBlockingQueue) {
@@ -818,8 +711,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
             }
           }
           catch (Exception e) {
-            throw new CacheException(e) {
-            };
+            throw new AssertionError(e);
           }
         }
 
@@ -830,16 +722,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invokeAsync(toggleFlag);
     vm2.invokeAsync(toggleFlag);
     vm3.invokeAsync(toggleFlag);
-//     try {
-//       Thread.sleep(5000);
-//     }
-//     catch (InterruptedException e2) {
-//       fail("Test failed as the test thread encoutered exception in sleep");
-//     }
-    CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable(
-        "Join with the threads") {
-      public void run2() throws CacheException
-      {
+
+    CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable("Join with the threads") {
+      @Override
+      public void run2() throws CacheException {
         for (int i = 0; i < opThreads.length; ++i) {
 
           if (opThreads[i].isInterrupted()) {
@@ -861,20 +747,21 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * This is to test the bug which is caused when HARegionQueue object hasnot
    * been fully constructed but as the HARegion has got constructed , it gets
    * visible to QRM Message Thread.
+   *
+   * TODO: this test runs too long! Shorten run time. 1m 40s on new Mac.
    */
   @Category(FlakyTest.class) // GEODE-690: async queuing, time sensitive, expiration, waitForCriterion, joins
-  public void testNPEDueToHARegionQueueEscapeInConstructor()
-  {
+  @Test
+  public void testNPEDueToHARegionQueueEscapeInConstructor() {
     // changing EXPIRY_TIME to 5 doesn't change how long the test runs!
     final int EXPIRY_TIME = 30; // test will run for this many seconds
     // Create two HARegionQueue 's in the two VMs. The frequency of QRM thread
     // should be high
     // Check for NullPointeException in the other VM.
-    CacheSerializableRunnable createQueuesAndThread = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue and start thread") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+    CacheSerializableRunnable createQueuesAndThread = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
         // 1 second , thus we need to carfully evaluate what to do. 
         //For this bug to appear ,without bugfix , qrm needs to run
@@ -897,8 +784,8 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
           }
           opThreads = new Thread[1];
           opThreads[0] = new Thread(new Runnable() {
-            public void run()
-            {
+            @Override
+            public void run() {
               for (int i = 0; i < OP_COUNT; ++i) {
                 try {
                   Object o = hrq.take();
@@ -907,7 +794,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
                   }
                 }
                 catch (InterruptedException e) {
-                  fail("interrupted");
+                  throw new AssertionError(e);
                 }
               }
             }
@@ -916,18 +803,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
 
-    CacheSerializableRunnable createQueues = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue ") {
-      public void run2() throws CacheException
-      {
+    CacheSerializableRunnable createQueues = new CacheSerializableRunnable("CreateCache, HARegionQueue ") {
+      @Override
+      public void run2() throws CacheException {
         createQueuesThread = Thread.currentThread();
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //System.getProperties().put("QueueRemovalThreadWaitTime",
          //   new Long(120000));
         cache = test.createCache();
@@ -940,19 +825,20 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
     
-    CacheSerializableRunnable waitForCreateQueuesThread = new CacheSerializableRunnable(
-        "joinCreateCache") {
+    CacheSerializableRunnable waitForCreateQueuesThread = new CacheSerializableRunnable("joinCreateCache") {
+      @Override
       public void run2() {
         WaitCriterion ev = new WaitCriterion() {
+          @Override
           public boolean done() {
             return createQueuesThread != null;
           }
+          @Override
           public String description() {
             return null;
           }
@@ -965,10 +851,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm0.invoke(createQueuesAndThread);
     vm1.invokeAsync(createQueues);
 
-    CacheSerializableRunnable joinWithThread = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue join with thread") {
-      public void run2() throws CacheException
-      {
+    CacheSerializableRunnable joinWithThread = new CacheSerializableRunnable("CreateCache, HARegionQueue join with thread") {
+      @Override
+      public void run2() throws CacheException {
         if (opThreads[0].isInterrupted()) {
           fail("The test has failed as it encountered interrupts in puts & takes");
         }
@@ -979,28 +864,23 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(waitForCreateQueuesThread);
   }
 
-  class RunOp implements Runnable
-  {
+  private static class RunOp implements Runnable {
 
-    int opType;
+    private static final int PUT = 1;
+    private static final int TAKE = 2;
+    private static final int PEEK = 3;
+    private static final int BATCH_PEEK = 4;
 
-    int threadID;
-
-    public static final int PUT = 1;
-
-    public static final int TAKE = 2;
-
-    public static final int PEEK = 3;
-
-    public static final int BATCH_PEEK = 4;
+    private int opType;
+    private int threadID;
 
     public RunOp(int opType, int id) {
       this.opType = opType;
       this.threadID = id;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       Region rgn = cache.getRegion("test_region");
       int counter = 0;
       LogWriter logger = cache.getLogger();
@@ -1052,35 +932,17 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         }
       }
       catch (Exception e) {
-        Thread.currentThread().interrupt();
+        throw new AssertionError(e);
       }
     }
   }
 
   /**
-   * This is to test the bug which is caused when HARegionQueue object hasnot
-   * been fully constructed but as the HARegion has got constructed , it gets
-   * visible to expiry thread task causing NullPointerException in some
-   * situations.
-   *
-   */
- /* public void testBugNo35989()
-  {
-    vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
-    vm1.invoke(() -> HARegionQueueDUnitTest.createHARegionQueueandCheckExpiration());
-
-  } */
-
-  /**
    * Checks the data received by GII, only gets expired after proper
    * construction of HARegionQueue object.
-   *
-   * @throws Exception
    */
-  public static void createHARegionQueueandCheckExpiration() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createHARegionQueueandCheckExpiration() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     HARegionQueueAttributes attrs = new HARegionQueueAttributes();
     attrs.setExpiryTime(1);
@@ -1090,9 +952,11 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     // wait until we have a dead
     // server
     WaitCriterion ev = new WaitCriterion() {
+      @Override
       public boolean done() {
         return hrq.getAvalaibleIds().size() == 0;
       }
+      @Override
       public String description() {
         return null;
       }
@@ -1101,21 +965,17 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     // assertIndexDetailsEquals(0, hrq.getAvalaibleIds().size());
   }
 
-  public void testForDuplicateEvents()
-  {
+  @Test
+  public void testForDuplicateEvents() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueueandCheckDuplicates());
   }
 
   /**
    *  HARegionQueue should not allow data with duplicate EventIds.
-   *
-   * @throws Exception
    */
-  public static void createRegionQueueandCheckDuplicates() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueueandCheckDuplicates() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
 
     hrq = HARegionQueue.getHARegionQueueInstance("HARegionQueueDUnitTest_region", cache,



[6/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index 8648b86..61baf52 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -17,7 +17,8 @@
 package com.gemstone.gemfire.cache30;
 
 import static com.gemstone.gemfire.internal.lang.ThrowableUtils.*;
-import static org.junit.Assert.*;
+import static com.gemstone.gemfire.test.dunit.Assert.*;
+import static org.junit.Assume.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInput;
@@ -41,8 +42,6 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.LinkedBlockingQueue;
 
-import junit.framework.Assert;
-import junit.framework.AssertionFailedError;
 import org.apache.logging.log4j.Logger;
 import org.junit.AfterClass;
 import org.junit.Ignore;
@@ -134,21 +133,21 @@ import com.gemstone.gemfire.test.junit.categories.FlakyTest;
 public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
   private static final Logger logger = LogService.getLogger();
-  
-  Properties props = new Properties();
-
-  final int putRange_1Start = 1;
-  final int putRange_1End = 5;
-  final int putRange_2Start = 6;
-  final int putRange_2End = 10;
-  final int putRange_3Start = 11;
-  final int putRange_3End = 15;
-  final int putRange_4Start = 16;
-  final int putRange_4End = 20;
-  final int removeRange_1Start = 2;
-  final int removeRange_1End = 4;
-  final int removeRange_2Start = 7;
-  final int removeRange_2End = 9;
+
+  private Properties props = new Properties();
+
+  private final int putRange_1Start = 1;
+  private final int putRange_1End = 5;
+  private final int putRange_2Start = 6;
+  private final int putRange_2End = 10;
+  private final int putRange_3Start = 11;
+  private final int putRange_3End = 15;
+  private final int putRange_4Start = 16;
+  private final int putRange_4End = 20;
+  private final int removeRange_1Start = 2;
+  private final int removeRange_1End = 4;
+  private final int removeRange_2Start = 7;
+  private final int removeRange_2End = 9;
 
   @AfterClass
   public static void caseTearDown() throws Exception {
@@ -333,7 +332,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * one VM updates the value in another VM.
    */
   @Test
-  public void testDistributedUpdate() {
+  public void testDistributedUpdate() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -401,11 +400,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    */
   @Ignore("DISABLED 4-16-04 - the current implementation assumes events are processed synchronously, which is no longer true")
   @Test
-  public void testOrderedUpdates() throws Throwable {
-    if (getRegionAttributes().getScope() ==
-        Scope.DISTRIBUTED_NO_ACK) {
-      return;
-    }
+  public void testOrderedUpdates() throws Exception {
+    assumeFalse(getRegionAttributes().getScope() == Scope.DISTRIBUTED_NO_ACK);
 
     final String name = this.getUniqueName();
     final Object key = "KEY";
@@ -443,7 +439,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   queue.put(value);
 
                 } catch (InterruptedException ex) {
-                  com.gemstone.gemfire.test.dunit.Assert.fail("Why was I interrupted?", ex);
+                  fail("Why was I interrupted?", ex);
                 }
               }
             });
@@ -465,7 +461,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 assertEquals(i, value.intValue());
 
               } catch (InterruptedException ex) {
-                com.gemstone.gemfire.test.dunit.Assert.fail("Why was I interrupted?", ex);
+                fail("Why was I interrupted?", ex);
               }
             }
           }
@@ -487,10 +483,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     ThreadUtils.join(ai1, 30 * 1000);
 
     if (ai0.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("ai0 failed", ai0.getException());
+      fail("ai0 failed", ai0.getException());
 
     } else if (ai1.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("ai1 failed", ai1.getException());
+      fail("ai1 failed", ai1.getException());
     }
   }
 
@@ -499,7 +495,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * <code>netSearch</code>.
    */
   @Test
-  public void testDistributedGet() {
+  public void testDistributedGet() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -534,9 +530,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * not have that key defined.
    */
   @Test
-  public void testDistributedPutNoUpdate()
-    throws InterruptedException {
-
+  public void testDistributedPutNoUpdate() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -591,7 +585,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * second VM should see the updated value.
    */
   @Test
-  public void testDefinedEntryUpdated() {
+  public void testDefinedEntryUpdated() throws Exception {
     final String name = this.getUniqueName();
     final Object key = "KEY";
     final Object oldValue = "OLD_VALUE";
@@ -648,7 +642,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * propagated to all VMs that define that entry.
    */
   @Test
-  public void testDistributedDestroy() throws InterruptedException {
+  public void testDistributedDestroy() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -739,9 +733,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * propagated to all VMs that define that region.
    */
   @Test
-  public void testDistributedRegionDestroy()
-    throws InterruptedException {
-
+  public void testDistributedRegionDestroy() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -789,10 +781,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * other VMs that define that entry.
    */
   @Test
-  public void testLocalDestroy() throws InterruptedException {
-    if (!supportsLocalDestroyAndLocalInvalidate()) {
-      return;
-    }
+  public void testLocalDestroy() throws Exception {
+    assumeTrue(supportsLocalDestroyAndLocalInvalidate());
+
     // test not valid for persistBackup region since they have to be
     // mirrored KEYS_VALUES
     if (getRegionAttributes().getDataPolicy().withPersistence()) return;
@@ -859,9 +850,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * to other VMs that define that region.
    */
   @Test
-  public void testLocalRegionDestroy()
-    throws InterruptedException {
-
+  public void testLocalRegionDestroy() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -908,7 +897,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * propagated to all VMs that define that entry.
    */
   @Test
-  public void testDistributedInvalidate() {
+  public void testDistributedInvalidate() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -984,7 +973,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * in multiple VMs does not cause any problems.
    */
   @Test
-  public void testDistributedInvalidate4() throws InterruptedException {
+  public void testDistributedInvalidate4() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1057,11 +1046,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * region is propagated to all VMs that define that entry.
    */
   @Test
-  public void testDistributedRegionInvalidate()
-    throws InterruptedException {
-    if (!supportsSubregions()) {
-      return;
-    }
+  public void testDistributedRegionInvalidate() throws Exception {
+    assumeTrue(supportsSubregions());
+
     final String name = this.getUniqueName();
     final String subname = "sub";
     final boolean useSubs = getRegionAttributes().getPartitionAttributes() == null;
@@ -1152,7 +1139,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    */
   @Category(FlakyTest.class) // GEODE-153 & GEODE-932: time sensitive, waitForInvocation (waitForCriterion), 3 second timeouts
   @Test
-  public void testRemoteCacheListener() throws InterruptedException {
+  public void testRemoteCacheListener() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1206,7 +1193,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 try {
                   assertEquals(newValue, DataSerializer.readObject(dis));
                 } catch (Exception e) {
-                  com.gemstone.gemfire.test.dunit.Assert.fail("Unexpected Exception", e);
+                  fail("Unexpected Exception", e);
                 }
               }
             };
@@ -1259,7 +1246,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 try {
                   assertEquals(newValue, DataSerializer.readObject(dis));
                 } catch (Exception e) {
-                  com.gemstone.gemfire.test.dunit.Assert.fail("Unexpected Exception", e);
+                  fail("Unexpected Exception", e);
                 }
               }
             };
@@ -1378,15 +1365,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
   }
 
-
   /**
    * Tests that a {@link CacheListener} is invoked in a remote VM.
    */
   @Test
-  public void testRemoteCacheListenerInSubregion() throws InterruptedException {
-    if (!supportsSubregions()) {
-      return;
-    }
+  public void testRemoteCacheListenerInSubregion() throws Exception {
+    assumeTrue(supportsSubregions());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1482,10 +1467,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * essentially tests <code>netLoad</code>.
    */
   @Test
-  public void testRemoteCacheLoader() throws InterruptedException {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testRemoteCacheLoader() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1546,10 +1530,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * is actually passed.
    */
   @Test
-  public void testRemoteCacheLoaderArg() throws InterruptedException {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testRemoteCacheLoaderArg() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1628,10 +1611,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * CacheLoaderException} results is propagated back to the caller.
    */
   @Test
-  public void testRemoteCacheLoaderException() throws InterruptedException {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testRemoteCacheLoaderException() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1694,12 +1676,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
   }
 
-
   @Test
-  public void testCacheLoaderWithNetSearch() throws CacheException {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testCacheLoaderWithNetSearch() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     // some tests use mirroring by default (e.g. persistBackup regions)
     // if so, then this test won't work right
     if (getRegionAttributes().getDataPolicy().withReplication()
@@ -1748,7 +1728,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertEquals(value, result);
             return result;
           } catch (TimeoutException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
+            fail("Why did I time out?", ex);
           }
           return null;
         }
@@ -1790,11 +1770,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     assertEquals(value, region.getEntry(key).getValue());
   }
 
-
   @Test
-  public void testCacheLoaderWithNetLoad() throws CacheException {
-
-
+  public void testCacheLoaderWithNetLoad() throws Exception {
     // replicated regions and partitioned regions make no sense for this
     // test
     if (getRegionAttributes().getDataPolicy().withReplication() ||
@@ -1843,7 +1820,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertEquals(value, result);
             return result;
           } catch (TimeoutException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
+            fail("Why did I time out?", ex);
           }
           return null;
         }
@@ -1877,7 +1854,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * there is no remote loader.
    */
   @Test
-  public void testNoRemoteCacheLoader() throws InterruptedException {
+  public void testNoRemoteCacheLoader() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -1912,10 +1889,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * value).
    */
   @Test
-  public void testNoLoaderWithInvalidEntry() {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testNoLoaderWithInvalidEntry() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     final String name = this.getUniqueName();
     final Object key = "KEY";
     final Object value = "VALUE";
@@ -1974,7 +1950,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * CacheWriterException}s are propagated appropriately.
    */
   @Test
-  public void testRemoteCacheWriter() throws InterruptedException {
+  public void testRemoteCacheWriter() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -2305,7 +2281,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
           } catch (CacheWriterException ex) {
             if (region.isDestroyed()) {
-              com.gemstone.gemfire.test.dunit.Assert.fail("should not have an exception if region is destroyed", ex);
+              fail("should not have an exception if region is destroyed", ex);
             }
             assertEquals(1, region.size());
             if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
@@ -2360,9 +2336,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * is invoked instead of a remote one.
    */
   @Test
-  public void testLocalAndRemoteCacheWriters()
-    throws InterruptedException {
-
+  public void testLocalAndRemoteCacheWriters() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -2458,9 +2432,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * <code>CacheWriter</code> even if it is in another VM.
    */
   @Test
-  public void testCacheLoaderModifyingArgument()
-    throws InterruptedException {
-
+  public void testCacheLoaderModifyingArgument() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -2568,10 +2540,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * returns <code>null</code> instead of causing infinite recursion.
    */
   @Test
-  public void testRemoteLoaderNetSearch() throws CacheException {
-    if (!supportsNetLoad()) {
-      return;
-    }
+  public void testRemoteLoaderNetSearch() throws Exception {
+    assumeTrue(supportsNetLoad());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -2602,7 +2573,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   assertNull(helper.netSearch(true));
 
                 } catch (TimeoutException ex) {
-                  com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
+                  fail("Why did I time out?", ex);
                 }
                 return value;
               }
@@ -2617,7 +2588,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * Tests that a local loader is preferred to a remote one
    */
   @Test
-  public void testLocalCacheLoader() {
+  public void testLocalCacheLoader() throws Exception {
     final String name = this.getUniqueName();
     final Object key = "KEY";
     final Object value = "VALUE";
@@ -2691,7 +2662,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testDistributedPut: Created Region");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -2708,7 +2679,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
          }
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -2870,10 +2841,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * DataPolicy#REPLICATE}.
    */
   @Test
-  public void testReplicate() throws InterruptedException {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testReplicate() throws Exception {
+    assumeTrue(supportsReplication());
+
     //pauseIfNecessary(100); // wait for previous tearDown to complete
 
     final String name = this.getUniqueName();
@@ -2999,10 +2969,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * region.
    */
   @Test
-  public void testDeltaWithReplicate() throws InterruptedException {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testDeltaWithReplicate() throws Exception {
+    assumeTrue(supportsReplication());
+
     //pauseIfNecessary(100); // wait for previous tearDown to complete
     
     final String name = this.getUniqueName();
@@ -3102,17 +3071,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     
   }
   
-  
-  
   /**
    * Tests that a newly-created mirrored region contains all of the
    * entries of another region.
    */
   @Test
-  public void testGetInitialImage() {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
+
     final String name = this.getUniqueName();
     final Object key1 = "KEY1";
     final Object value1 = "VALUE1";
@@ -3185,19 +3151,18 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
   }
 
-
   private static final int CHUNK_SIZE = 500 * 1024; // == InitialImageOperation.CHUNK_SIZE_IN_BYTES
   private static final int NUM_ENTRIES = 100;
   private static final int VALUE_SIZE = CHUNK_SIZE * 10 / NUM_ENTRIES;
+
   /**
    * Tests that a newly-created mirrored region contains all of the
    * entries of another region, with a large quantity of data.
    */
   @Test
-  public void testLargeGetInitialImage() {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testLargeGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
+
     final String name = this.getUniqueName();
     final Integer[] keys = new Integer[NUM_ENTRIES];
     final byte[][] values = new byte[NUM_ENTRIES][];
@@ -3274,11 +3239,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * non-mirrored region and the afterCreate event is invoked on a listener.
    */
   @Test
-  public void testMirroredDataFromNonMirrored()
-    throws InterruptedException {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testMirroredDataFromNonMirrored() throws Exception {
+    assumeTrue(supportsReplication());
 
     final String name = this.getUniqueName();
     final Object key1 = "KEY1";
@@ -3393,11 +3355,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * region.
    */
   @Test
-  public void testNoMirroredDataToNonMirrored()
-    throws InterruptedException {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testNoMirroredDataToNonMirrored() throws Exception {
+    assumeTrue(supportsReplication());
 
     final String name = this.getUniqueName();
     final Object key1 = "KEY1";
@@ -3477,10 +3436,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * Tests that a local load occurs, even with mirroring
    */
   @Test
-  public void testMirroredLocalLoad() {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testMirroredLocalLoad() throws Exception {
+    assumeTrue(supportsReplication());
+
     final String name = this.getUniqueName();
     final Object key = "KEY";
     final Object value = "VALUE";
@@ -3555,10 +3513,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * mirroring
    */
   @Test
-  public void testMirroredNetLoad() {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testMirroredNetLoad() throws Exception {
+    assumeTrue(supportsReplication());
+
     final String name = this.getUniqueName();
     final Object key = "KEY";
     final Object value = "VALUE";
@@ -3613,13 +3570,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
   }
 
-  ////////  Region Keep Alive Tests
-
   /**
    * Tests that a region is not kept alive
    */
   @Test
-  public void testNoRegionKeepAlive() throws InterruptedException {
+  public void testNoRegionKeepAlive() throws Exception {
     final String name = this.getUniqueName();
     final Object key = "KEEP_ALIVE_KEY";
     final Object value = "VALUE";
@@ -3662,15 +3617,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
   }
 
-
-
   @Test
-  public void testNetSearchObservesTtl()
-  throws InterruptedException
-  {
-    if(getRegionAttributes().getPartitionAttributes() != null)
-      return;
-    
+  public void testNetSearchObservesTtl() throws Exception {
+    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
+
     final String name = this.getUniqueName();
     final int shortTimeout = 10; // ms
     final int longTimeout = 1000000; // ms
@@ -3755,12 +3705,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
   }
 
   @Test
-  public void testNetSearchObservesIdleTime()
-  throws InterruptedException
-  {
-    if(getRegionAttributes().getPartitionAttributes() != null)
-      return;
-    
+  public void testNetSearchObservesIdleTime() throws Exception {
+    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
+
     final String name = this.getUniqueName();
     final int shortTimeout = 10; // ms
     final int longTimeout = 10000; // ms
@@ -3846,8 +3793,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     });
   }
 
-
-  static TestCacheListener destroyListener = null;
+  private static TestCacheListener destroyListener = null;
 
   /**
    * Tests that an entry in a distributed region that expires with a distributed
@@ -3855,157 +3801,155 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    */
   @Category(FlakyTest.class) // GEODE-583: time sensitive, expiration, waitForCriterion, short timeouts
   @Test
-  public void testEntryTtlDestroyEvent() throws InterruptedException {
-      
-      if(getRegionAttributes().getPartitionAttributes() != null)
-        return;
-      
-      final String name = this.getUniqueName();
-      final int timeout = 22; // ms
-      final Object key = "KEY";
-      final Object value = "VALUE";
+  public void testEntryTtlDestroyEvent() throws Exception {
+    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
 
-      Host host = Host.getHost(0);
-      VM vm0 = host.getVM(0);
-      VM vm1 = host.getVM(1);
+    final String name = this.getUniqueName();
+    final int timeout = 22; // ms
+    final Object key = "KEY";
+    final Object value = "VALUE";
 
-      class DestroyListener extends TestCacheListener {
-        boolean eventIsExpiration = false;
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
 
-        public void afterDestroyBeforeAddEvent(EntryEvent event) {
-          eventIsExpiration = event.isExpiration();
-        }
-        public void afterDestroy2(EntryEvent event) {
-          if (event.isOriginRemote()) {
-            assertTrue(!event.getDistributedMember().equals(getSystem().getDistributedMember()));
-          } else {
-            assertEquals(getSystem().getDistributedMember(), event.getDistributedMember());
-          }
-          assertEquals(Operation.EXPIRE_DESTROY, event.getOperation());
-          assertEquals(value, event.getOldValue());
-          eventIsExpiration = event.getOperation().isExpiration();
-        }
+    class DestroyListener extends TestCacheListener {
+      boolean eventIsExpiration = false;
 
-        public void afterCreate2(EntryEvent event) {
-          // ignore
+      public void afterDestroyBeforeAddEvent(EntryEvent event) {
+        eventIsExpiration = event.isExpiration();
+      }
+      public void afterDestroy2(EntryEvent event) {
+        if (event.isOriginRemote()) {
+          assertTrue(!event.getDistributedMember().equals(getSystem().getDistributedMember()));
+        } else {
+          assertEquals(getSystem().getDistributedMember(), event.getDistributedMember());
         }
+        assertEquals(Operation.EXPIRE_DESTROY, event.getOperation());
+        assertEquals(value, event.getOldValue());
+        eventIsExpiration = event.getOperation().isExpiration();
+      }
 
-        public void afterUpdate2(EntryEvent event) {
-          // ignore
-        }
+      public void afterCreate2(EntryEvent event) {
+        // ignore
+      }
+
+      public void afterUpdate2(EntryEvent event) {
+        // ignore
+      }
+    }
+
+
+    SerializableRunnable createRegion = new CacheSerializableRunnable("Create with Listener") {
+      public void run2() throws CacheException {
+        AttributesFactory fac = new AttributesFactory(getRegionAttributes());
+        fac.addCacheListener(destroyListener = new DestroyListener());
+        createRegion(name, fac.create());
       }
+    };
 
+    vm1.invoke(createRegion);
 
-      SerializableRunnable createRegion = new CacheSerializableRunnable("Create with Listener") {
+    vm0.invoke(new CacheSerializableRunnable("Create with TTL") {
         public void run2() throws CacheException {
-          AttributesFactory fac = new AttributesFactory(getRegionAttributes());
-          fac.addCacheListener(destroyListener = new DestroyListener());
-          createRegion(name, fac.create());
+          AttributesFactory factory = new AttributesFactory(getRegionAttributes());
+          factory.setStatisticsEnabled(true);
+          ExpirationAttributes expire =
+            new ExpirationAttributes(timeout,
+                                     ExpirationAction.DESTROY);
+          factory.setEntryTimeToLive(expire);
+          if (!getRegionAttributes().getDataPolicy().withReplication()) {
+            factory.setDataPolicy(DataPolicy.NORMAL);
+            factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
+          }
+          System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
+          try {
+            createRegion(name, factory.create());
+            ExpiryTask.suspendExpiration();
+            // suspend to make sure we can see that the put is distributed to this member
+          }
+          finally {
+            System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
+          }
         }
-      };
+      });
 
-      vm1.invoke(createRegion);
+    try {
 
-      vm0.invoke(new CacheSerializableRunnable("Create with TTL") {
-          public void run2() throws CacheException {
-            AttributesFactory factory = new AttributesFactory(getRegionAttributes());
-            factory.setStatisticsEnabled(true);
-            ExpirationAttributes expire =
-              new ExpirationAttributes(timeout,
-                                       ExpirationAction.DESTROY);
-            factory.setEntryTimeToLive(expire);
-            if (!getRegionAttributes().getDataPolicy().withReplication()) {
-              factory.setDataPolicy(DataPolicy.NORMAL);
-              factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
-            }
-            System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
-            try {
-              createRegion(name, factory.create());
-              ExpiryTask.suspendExpiration();
-              // suspend to make sure we can see that the put is distributed to this member
-            } 
-            finally {
-              System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
-            }
-          }
-        });
-      
-      try {
+    // let region create finish before doing put
+    //pause(10);
 
-      // let region create finish before doing put
-      //pause(10);
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region region = getRootRegion().getSubregion(name);
+        DestroyListener dl = (DestroyListener)region.getAttributes().getCacheListeners()[0];
+        dl.enableEventHistory();
+        region.put(key, value);
+        // reset listener after create event
+        assertTrue(dl.wasInvoked());
+        List<CacheEvent> history = dl.getEventHistory();
+        CacheEvent ce = history.get(0);
+        dl.disableEventHistory();
+        assertEquals(Operation.CREATE, ce.getOperation());
+        return null;
+      }
+    });
+    vm0.invoke(new CacheSerializableRunnable("Check create received from vm1") {
+      public void run2() throws CacheException {
+        final Region region = getRootRegion().getSubregion(name);
+        WaitCriterion waitForCreate = new WaitCriterion() {
+          public boolean done() {
+            return region.getEntry(key) != null;
+          }
+          public String description() {
+            return "never saw create of " + key;
+          }
+        };
+        Wait.waitForCriterion(waitForCreate, 3000, 10, true);
+      }
+    });
 
-      vm1.invoke(new SerializableCallable() {
-        public Object call() throws Exception {
-          Region region = getRootRegion().getSubregion(name);
-          DestroyListener dl = (DestroyListener)region.getAttributes().getCacheListeners()[0];
-          dl.enableEventHistory();
-          region.put(key, value);
-          // reset listener after create event
-          assertTrue(dl.wasInvoked());
-          List<CacheEvent> history = dl.getEventHistory();
-          CacheEvent ce = history.get(0);
-          dl.disableEventHistory();
-          assertEquals(Operation.CREATE, ce.getOperation());
-          return null;
+    } finally {
+      vm0.invoke(new CacheSerializableRunnable("resume expiration") {
+        public void run2() throws CacheException {
+          ExpiryTask.permitExpiration();
         }
       });
-      vm0.invoke(new CacheSerializableRunnable("Check create received from vm1") {
+    }
+
+    // now wait for it to expire
+    vm0.invoke(new CacheSerializableRunnable("Check local destroy") {
         public void run2() throws CacheException {
           final Region region = getRootRegion().getSubregion(name);
-          WaitCriterion waitForCreate = new WaitCriterion() {
+          WaitCriterion waitForExpire = new WaitCriterion() {
             public boolean done() {
-              return region.getEntry(key) != null;
+              return region.getEntry(key) == null;
             }
             public String description() {
-              return "never saw create of " + key;
+              return "never saw expire of " + key + " entry=" + region.getEntry(key);
             }
           };
-          Wait.waitForCriterion(waitForCreate, 3000, 10, true);
+          Wait.waitForCriterion(waitForExpire, 4000, 10, true);
         }
       });
-      
-      } finally {
-        vm0.invoke(new CacheSerializableRunnable("resume expiration") {
-          public void run2() throws CacheException {
-            ExpiryTask.permitExpiration();
-          }
-        });
-      }
-      
-      // now wait for it to expire
-      vm0.invoke(new CacheSerializableRunnable("Check local destroy") {
-          public void run2() throws CacheException {
-            final Region region = getRootRegion().getSubregion(name);
-            WaitCriterion waitForExpire = new WaitCriterion() {
-              public boolean done() {
-                return region.getEntry(key) == null;
-              }
-              public String description() {
-                return "never saw expire of " + key + " entry=" + region.getEntry(key);
-              }
-            };
-            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
-          }
-        });
 
-      vm1.invoke(new CacheSerializableRunnable("Verify destroyed and event") {
-          public void run2() throws CacheException {
-            final Region region = getRootRegion().getSubregion(name);
-            WaitCriterion waitForExpire = new WaitCriterion() {
-              public boolean done() {
-                return region.getEntry(key) == null;
-              }
-              public String description() {
-                return "never saw expire of " + key + " entry=" + region.getEntry(key);
-              }
-            };
-            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
-            assertTrue(destroyListener.waitForInvocation(555));
-            assertTrue(((DestroyListener)destroyListener).eventIsExpiration);
-          }
-        });
-    }
+    vm1.invoke(new CacheSerializableRunnable("Verify destroyed and event") {
+        public void run2() throws CacheException {
+          final Region region = getRootRegion().getSubregion(name);
+          WaitCriterion waitForExpire = new WaitCriterion() {
+            public boolean done() {
+              return region.getEntry(key) == null;
+            }
+            public String description() {
+              return "never saw expire of " + key + " entry=" + region.getEntry(key);
+            }
+          };
+          Wait.waitForCriterion(waitForExpire, 4000, 10, true);
+          assertTrue(destroyListener.waitForInvocation(555));
+          assertTrue(((DestroyListener)destroyListener).eventIsExpiration);
+        }
+      });
+  }
 
   /**
    * Tests that an entry in a distributed region expires with a local
@@ -4013,261 +3957,258 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    */
   @Category(FlakyTest.class) // GEODE-671: time sensitive, expiration, retry loop, async actions, waitForCriterion
   @Test
-  public void testEntryTtlLocalDestroy() throws InterruptedException {
-      if(getRegionAttributes().getPartitionAttributes() != null)
-        return;
-      final boolean mirrored = getRegionAttributes().getDataPolicy().withReplication();
-      final boolean partitioned = getRegionAttributes().getPartitionAttributes() != null ||
-           getRegionAttributes().getDataPolicy().withPartitioning();
-      if (!mirrored) {
-        // This test fails intermittently because the DSClock we inherit from the existing
-        // distributed system is stuck in the "stopped" state.
-        // The DSClock is going away when java groups is merged and at that
-        // time this following can be removed.
-        disconnectAllFromDS();
-      }
-
-      final String name = this.getUniqueName();
-      final int timeout = 10; // ms
-      final Object key = "KEY";
-      final Object value = "VALUE";
-
-      Host host = Host.getHost(0);
-      VM vm0 = host.getVM(0);
-      VM vm1 = host.getVM(1);
-
-      SerializableRunnable create = new CacheSerializableRunnable("Populate") {
-        public void run2() throws CacheException {
-        System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
-        try {
-          Region region = createRegion(name);
-        }
-        finally {
-          System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
-        }
+  public void testEntryTtlLocalDestroy() throws Exception {
+    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
+
+    final boolean mirrored = getRegionAttributes().getDataPolicy().withReplication();
+    final boolean partitioned = getRegionAttributes().getPartitionAttributes() != null ||
+         getRegionAttributes().getDataPolicy().withPartitioning();
+    if (!mirrored) {
+      // This test fails intermittently because the DSClock we inherit from the existing
+      // distributed system is stuck in the "stopped" state.
+      // The DSClock is going away when java groups is merged and at that
+      // time this following can be removed.
+      disconnectAllFromDS();
+    }
+
+    final String name = this.getUniqueName();
+    final int timeout = 10; // ms
+    final Object key = "KEY";
+    final Object value = "VALUE";
+
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    SerializableRunnable create = new CacheSerializableRunnable("Populate") {
+      public void run2() throws CacheException {
+      System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
+      try {
+        Region region = createRegion(name);
       }
-      };
+      finally {
+        System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
+      }
+    }
+    };
 
-      vm1.invoke(create);
-      
-      vm0.invoke(new CacheSerializableRunnable("Create with TTL") {
-          public void run2() throws CacheException {
-            AttributesFactory factory = new AttributesFactory(getRegionAttributes());
-            factory.setStatisticsEnabled(true);
-            ExpirationAttributes expire =
-              new ExpirationAttributes(timeout,
-                                       ExpirationAction.LOCAL_DESTROY);
-            factory.setEntryTimeToLive(expire);
-            if (!mirrored) {
-              // make it cached all events so that remote creates will also
-              // be created here
-              if(!partitioned){
-               factory.setDataPolicy(DataPolicy.NORMAL);
-              }
-              factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
-              factory.addCacheListener(new CountingDistCacheListener());
-            }
-            /**
-             * Crank up the expiration so test runs faster.
-             * This property only needs to be set while the region is created
-             */
-            System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
-            try {
-              createRegion(name, factory.create());
-              if (mirrored) fail("Should have thrown an IllegalStateException");
-            }
-            catch (IllegalStateException e) {
-              if (!mirrored) throw e;
-            } 
-            finally {
-              System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
+    vm1.invoke(create);
+
+    vm0.invoke(new CacheSerializableRunnable("Create with TTL") {
+        public void run2() throws CacheException {
+          AttributesFactory factory = new AttributesFactory(getRegionAttributes());
+          factory.setStatisticsEnabled(true);
+          ExpirationAttributes expire =
+            new ExpirationAttributes(timeout,
+                                     ExpirationAction.LOCAL_DESTROY);
+          factory.setEntryTimeToLive(expire);
+          if (!mirrored) {
+            // make it cached all events so that remote creates will also
+            // be created here
+            if(!partitioned){
+             factory.setDataPolicy(DataPolicy.NORMAL);
             }
+            factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
+            factory.addCacheListener(new CountingDistCacheListener());
+          }
+          /**
+           * Crank up the expiration so test runs faster.
+           * This property only needs to be set while the region is created
+           */
+          System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
+          try {
+            createRegion(name, factory.create());
+            if (mirrored) fail("Should have thrown an IllegalStateException");
+          }
+          catch (IllegalStateException e) {
+            if (!mirrored) throw e;
+          }
+          finally {
+            System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
           }
-        });
-      if (mirrored) return;
-
-      vm1.invoke(new SerializableCallable() {
-        public Object call() throws Exception {
-          Region region = getRootRegion().getSubregion(name);
-          region.put(key, value);
-          return null;
         }
       });
-      
-      vm0.invoke(new CacheSerializableRunnable("Check local destroy") {
-          public void run2() throws CacheException {
-            final Region region =
-              getRootRegion().getSubregion(name);
-            // make sure we created the entry
-            {
-              CountingDistCacheListener l = (CountingDistCacheListener)
-                region.getAttributes().getCacheListeners()[0];
-              int retry = 1000;
-              while (retry-- > 0) {
-                try {
-                  l.assertCount(1, 0, 0, 0);
-                  // TODO: a race exists in which assertCount may also see a destroyCount of 1
-                  logger.info("DEBUG: saw create");
-                  break;
-                } catch (AssertionFailedError e) {
-                  if (retry > 0) {
-                    Wait.pause(1);
-                  } else {
-                    throw e;
-                  }
+    if (mirrored) return;
+
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region region = getRootRegion().getSubregion(name);
+        region.put(key, value);
+        return null;
+      }
+    });
+
+    vm0.invoke(new CacheSerializableRunnable("Check local destroy") {
+        public void run2() throws CacheException {
+          final Region region =
+            getRootRegion().getSubregion(name);
+          // make sure we created the entry
+          {
+            CountingDistCacheListener l = (CountingDistCacheListener)
+              region.getAttributes().getCacheListeners()[0];
+            int retry = 1000;
+            while (retry-- > 0) {
+              try {
+                l.assertCount(1, 0, 0, 0);
+                // TODO: a race exists in which assertCount may also see a destroyCount of 1
+                logger.info("DEBUG: saw create");
+                break;
+              } catch (AssertionError e) {
+                if (retry > 0) {
+                  Wait.pause(1);
+                } else {
+                  throw e;
                 }
               }
             }
+          }
 
-            { // now make sure it expires
-              // this should happen really fast since timeout is 10 ms.
-              // But it may take longer in some cases because of thread
-              // scheduling delays and machine load (see GEODE-410).
-              // The previous code would fail after 100ms; now we wait 3000ms.
-              WaitCriterion waitForUpdate = new WaitCriterion() {
-                public boolean done() {
-                  Region.Entry re = region.getEntry(key);
-                  if (re != null) {
-                    EntryExpiryTask eet = getEntryExpiryTask(region, key);
-                    if (eet != null) {
-                      long stopTime = ((InternalDistributedSystem)(region.getCache().getDistributedSystem())).getClock().getStopTime();
-                      logger.info("DEBUG: waiting for expire destroy expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " stopTime=" + stopTime + " currentTimeMillis=" + System.currentTimeMillis());
-                    } else {
-                      logger.info("DEBUG: waiting for expire destroy but expiry task is null");
-                    }
+          { // now make sure it expires
+            // this should happen really fast since timeout is 10 ms.
+            // But it may take longer in some cases because of thread
+            // scheduling delays and machine load (see GEODE-410).
+            // The previous code would fail after 100ms; now we wait 3000ms.
+            WaitCriterion waitForUpdate = new WaitCriterion() {
+              public boolean done() {
+                Region.Entry re = region.getEntry(key);
+                if (re != null) {
+                  EntryExpiryTask eet = getEntryExpiryTask(region, key);
+                  if (eet != null) {
+                    long stopTime = ((InternalDistributedSystem)(region.getCache().getDistributedSystem())).getClock().getStopTime();
+                    logger.info("DEBUG: waiting for expire destroy expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " stopTime=" + stopTime + " currentTimeMillis=" + System.currentTimeMillis());
+                  } else {
+                    logger.info("DEBUG: waiting for expire destroy but expiry task is null");
                   }
-                  return re == null;
                 }
-                public String description() {
-                  String expiryInfo = "";
-                  try {
-                    EntryExpiryTask eet = getEntryExpiryTask(region, key);
-                    if (eet != null) {
-                      expiryInfo = "expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " currentTimeMillis=" + System.currentTimeMillis();
-                    }
-                  } catch (EntryNotFoundException ex) {
-                    expiryInfo ="EntryNotFoundException when getting expiry task";
+                return re == null;
+              }
+              public String description() {
+                String expiryInfo = "";
+                try {
+                  EntryExpiryTask eet = getEntryExpiryTask(region, key);
+                  if (eet != null) {
+                    expiryInfo = "expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " currentTimeMillis=" + System.currentTimeMillis();
                   }
-                  return "Entry for key " + key + " never expired (since it still exists) " + expiryInfo;
+                } catch (EntryNotFoundException ex) {
+                  expiryInfo ="EntryNotFoundException when getting expiry task";
                 }
-              };
-              Wait.waitForCriterion(waitForUpdate, 30000, 1, true);
-            }
-            assertNull(region.getEntry(key));
+                return "Entry for key " + key + " never expired (since it still exists) " + expiryInfo;
+              }
+            };
+            Wait.waitForCriterion(waitForUpdate, 30000, 1, true);
           }
-        });
+          assertNull(region.getEntry(key));
+        }
+      });
 
-      vm1.invoke(new CacheSerializableRunnable("Verify local") {
-          public void run2() throws CacheException {
-            Region region =
-              getRootRegion().getSubregion(name);
-            Region.Entry entry = region.getEntry(key);
-            assertEquals(value, entry.getValue());
-          }
-        });
-    }
+    vm1.invoke(new CacheSerializableRunnable("Verify local") {
+        public void run2() throws CacheException {
+          Region region =
+            getRootRegion().getSubregion(name);
+          Region.Entry entry = region.getEntry(key);
+          assertEquals(value, entry.getValue());
+        }
+      });
+  }
     
-    private static EntryExpiryTask getEntryExpiryTask(Region r, Object key) {
-      EntryExpiryTask result = null;
-      try {
-        LocalRegion lr = (LocalRegion) r;
-        result = lr.getEntryExpiryTask(key);
-      } catch (EntryNotFoundException ignore) {
-      }
-      return result;
+  private static EntryExpiryTask getEntryExpiryTask(Region r, Object key) {
+    EntryExpiryTask result = null;
+    try {
+      LocalRegion lr = (LocalRegion) r;
+      result = lr.getEntryExpiryTask(key);
+    } catch (EntryNotFoundException ignore) {
     }
+    return result;
+  }
 
-    /**
-     * Tests to makes sure that a distributed update resets the
-     * expiration timer.
-     */
+  /**
+   * Tests to makes sure that a distributed update resets the
+   * expiration timer.
+   */
   @Test
-  public void testUpdateResetsIdleTime() throws InterruptedException {
-
-      final String name = this.getUniqueName();
-      // test no longer waits for this timeout to expire
-      final int timeout = 90; // seconds
-      final Object key = "KEY";
-      final Object value = "VALUE";
-
-      Host host = Host.getHost(0);
-      VM vm0 = host.getVM(0);
-      VM vm1 = host.getVM(1);
-
+  public void testUpdateResetsIdleTime() throws Exception {
+    final String name = this.getUniqueName();
+    // test no longer waits for this timeout to expire
+    final int timeout = 90; // seconds
+    final Object key = "KEY";
+    final Object value = "VALUE";
 
-      vm0.invoke(new CacheSerializableRunnable("Create with Idle") {
-        public void run2() throws CacheException {
-          AttributesFactory factory = new AttributesFactory(getRegionAttributes());
-          factory.setStatisticsEnabled(true);
-          ExpirationAttributes expire =
-              new ExpirationAttributes(timeout,
-                  ExpirationAction.DESTROY);
-          factory.setEntryIdleTimeout(expire);
-          LocalRegion region =
-              (LocalRegion) createRegion(name, factory.create());
-          if (region.getDataPolicy().withPartitioning()) {
-            // Force all buckets to be created locally so the
-            // test will know that the create happens in this vm
-            // and the update (in vm1) is remote.
-            PartitionRegionHelper.assignBucketsToPartitions(region);
-          }
-          region.create(key, null);
-          EntryExpiryTask eet = region.getEntryExpiryTask(key);
-          region.create("createExpiryTime", eet.getExpirationTime());
-          Wait.waitForExpiryClockToChange(region);
-        }
-      });
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
 
-      vm1.invoke(new CacheSerializableRunnable("Create Region " + name) {
-        public void run2() throws CacheException {
-          AttributesFactory factory = new AttributesFactory(getRegionAttributes());
-          factory.setStatisticsEnabled(true);
-          ExpirationAttributes expire =
-              new ExpirationAttributes(timeout,
-                  ExpirationAction.DESTROY);
-          factory.setEntryIdleTimeout(expire);
-          if(getRegionAttributes().getPartitionAttributes() != null){
-            createRegion(name, factory.create());  
-          } else {
-            createRegion(name);
-          }          
-        }
-      });
+    vm0.invoke(new CacheSerializableRunnable("Create with Idle") {
+      public void run2() throws CacheException {
+        AttributesFactory factory = new AttributesFactory(getRegionAttributes());
+        factory.setStatisticsEnabled(true);
+        ExpirationAttributes expire =
+            new ExpirationAttributes(timeout,
+                ExpirationAction.DESTROY);
+        factory.setEntryIdleTimeout(expire);
+        LocalRegion region =
+            (LocalRegion) createRegion(name, factory.create());
+        if (region.getDataPolicy().withPartitioning()) {
+          // Force all buckets to be created locally so the
+          // test will know that the create happens in this vm
+          // and the update (in vm1) is remote.
+          PartitionRegionHelper.assignBucketsToPartitions(region);
+        }
+        region.create(key, null);
+        EntryExpiryTask eet = region.getEntryExpiryTask(key);
+        region.create("createExpiryTime", eet.getExpirationTime());
+        Wait.waitForExpiryClockToChange(region);
+      }
+    });
 
-      vm1.invoke(new CacheSerializableRunnable("Update entry") {
-        public void run2() throws CacheException {
-          final Region r = getRootRegion().getSubregion(name);
-          assertNotNull(r);
-          r.put(key, value);
+    vm1.invoke(new CacheSerializableRunnable("Create Region " + name) {
+      public void run2() throws CacheException {
+        AttributesFactory factory = new AttributesFactory(getRegionAttributes());
+        factory.setStatisticsEnabled(true);
+        ExpirationAttributes expire =
+            new ExpirationAttributes(timeout,
+                ExpirationAction.DESTROY);
+        factory.setEntryIdleTimeout(expire);
+        if(getRegionAttributes().getPartitionAttributes() != null){
+          createRegion(name, factory.create());
+        } else {
+          createRegion(name);
         }
-      });
+      }
+    });
 
-      vm0.invoke(new CacheSerializableRunnable("Verify reset") {
-        public void run2() throws CacheException {
-          final LocalRegion region =
-              (LocalRegion) getRootRegion().getSubregion(name);
+    vm1.invoke(new CacheSerializableRunnable("Update entry") {
+      public void run2() throws CacheException {
+        final Region r = getRootRegion().getSubregion(name);
+        assertNotNull(r);
+        r.put(key, value);
+      }
+    });
 
-          // wait for update to reach us from vm1 (needed if no-ack)
-          WaitCriterion waitForUpdate = new WaitCriterion() {
-            public boolean done() {
-              return value.equals(region.get(key));
-            }
-            public String description() {
-              return "never saw update of " + key;
-            }
-          };
-          Wait.waitForCriterion(waitForUpdate, 3000, 10, true);
+    vm0.invoke(new CacheSerializableRunnable("Verify reset") {
+      public void run2() throws CacheException {
+        final LocalRegion region =
+            (LocalRegion) getRootRegion().getSubregion(name);
 
-          EntryExpiryTask eet = region.getEntryExpiryTask(key);
-          long createExpiryTime = (Long) region.get("createExpiryTime");
-          long updateExpiryTime = eet.getExpirationTime();
-          if (updateExpiryTime - createExpiryTime <= 0L) {
-            fail("update did not reset the expiration time. createExpiryTime=" + createExpiryTime + " updateExpiryTime=" + updateExpiryTime);
+        // wait for update to reach us from vm1 (needed if no-ack)
+        WaitCriterion waitForUpdate = new WaitCriterion() {
+          public boolean done() {
+            return value.equals(region.get(key));
           }
-        }
-      });
-    }
+          public String description() {
+            return "never saw update of " + key;
+          }
+        };
+        Wait.waitForCriterion(waitForUpdate, 3000, 10, true);
 
+        EntryExpiryTask eet = region.getEntryExpiryTask(key);
+        long createExpiryTime = (Long) region.get("createExpiryTime");
+        long updateExpiryTime = eet.getExpirationTime();
+        if (updateExpiryTime - createExpiryTime <= 0L) {
+          fail("update did not reset the expiration time. createExpiryTime=" + createExpiryTime + " updateExpiryTime=" + updateExpiryTime);
+        }
+      }
+    });
+  }
 
   private static final int NB1_CHUNK_SIZE = 500 * 1024; // == InitialImageOperation.CHUNK_SIZE_IN_BYTES
   private static final int NB1_NUM_ENTRIES = 1000;
@@ -4278,15 +4219,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * another cache is doing a getInitialImage.
    */
   @Test
-  public void testNonblockingGetInitialImage() throws Throwable {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testNonblockingGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
     // don't run this test if global scope since its too difficult to predict
     // how many concurrent operations will occur
-    if (getRegionAttributes().getScope().isGlobal()) {
-      return;
-    }
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
 
     final String name = this.getUniqueName();
     final byte[][] values = new byte[NB1_NUM_ENTRIES][];
@@ -4390,7 +4327,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         } else {
           numProfiles = adv.adviseReplicates().size();              
         }
-        Assert.assertTrue(numProfiles >= expectedProfiles);
+        assertTrue(numProfiles >= expectedProfiles);
         
         // operate on every odd entry with different value, alternating between
         // updates, invalidates, and destroys. These operations are likely
@@ -4480,10 +4417,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       ThreadUtils.join(async, 30 * 1000);
     }
     if (async.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
+      fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
+      fail("asyncGII failed", asyncGII.getException());
     }
 
     // Locally destroy the region in vm0 so we know that they are not found by
@@ -4609,19 +4546,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * another cache is doing a getInitialImage.
    */
   @Test
-  public void testTXNonblockingGetInitialImage() throws Throwable {
-    if (!supportsReplication()) {
-      return;
-    }
-    if (!supportsTransactions()) {
-      return;
-    }
+  public void testTXNonblockingGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
+    assumeTrue(supportsTransactions());
     // don't run this test if global scope since its too difficult to predict
     // how many concurrent operations will occur
-    if (getRegionAttributes().getScope().isGlobal()
-        || getRegionAttributes().getDataPolicy().withPersistence()) {
-      return;
-    }
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
+    assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
 
     final String name = this.getUniqueName();
     final byte[][] values = new byte[NB1_NUM_ENTRIES][];
@@ -4812,10 +4743,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     }
 
     if (async.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
+      fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
+      fail("asyncGII failed", asyncGII.getException());
     }
 
     // Locally destroy the region in vm0 so we know that they are not found by
@@ -4926,11 +4857,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
   @Ignore("Disabled for 51542")
   @Test
-  public void testNBRegionInvalidationDuringGetInitialImage() throws Throwable {
+  public void testNBRegionInvalidationDuringGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
+
     DistributedTestCase.disconnectAllFromDS();
-    if (!supportsReplication()) {
-      return;
-    }
+
     // don't run this for noAck, too many race conditions
     if (getRegionAttributes().getScope().isDistributedNoAck()) return;
 
@@ -5251,10 +5182,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
   }
 
   @Test
-  public void testNBRegionDestructionDuringGetInitialImage() throws Throwable {
-    if (!supportsReplication()) {
-      return;
-    }
+  public void testNBRegionDestructionDuringGetInitialImage() throws Exception {
+    assumeTrue(supportsReplication());
+
     final String name = this.getUniqueName();
     final byte[][] values = new byte[NB1_NUM_ENTRIES][];
 
@@ -5431,7 +5361,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       // wait for nonblocking operations to complete
       ThreadUtils.join(async, 30 * 1000);
       if (async.exceptionOccurred()) {
-        com.gemstone.gemfire.test.dunit.Assert.fail("async invocation failed", async.getException());
+        fail("async invocation failed", async.getException());
       }
 
       vm2.invoke(new SerializableRunnable("Set fast image processing") {
@@ -5451,10 +5381,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       ThreadUtils.join(async, 30 * 1000);
     }
     if (async.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
+      fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
+      fail("asyncGII failed", asyncGII.getException());
     }
     } finally { 
       ex.remove();
@@ -5468,7 +5398,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * @since 3.5
    */
   @Test
-  public void testNoDataSerializer() {
+  public void testNoDataSerializer() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -5480,7 +5410,6 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -5621,7 +5550,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * @since 3.5
    */
   @Test
-  public void testNoInstantiator() {
+  public void testNoInstantiator() throws Exception {
     assertTrue(getRegionAttributes().getScope().isDistributed());
 
     final String name = this.getUniqueName();
@@ -5633,7 +5562,6 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -6051,16 +5979,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     cdcl.setEntryEvent(null);
   }
 
-  ////////////////////////// TX Tests //////////////////////////////
   /**
    * Tests that an entry update is propagated to other caches that
    * have that same entry defined.
    */
   @Test
   public void testTXSimpleOps() throws Exception {
-    if (!supportsTransactions()) {
-      return;
-    }
+    assumeTrue(supportsTransactions());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
     CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
 
@@ -6078,6 +6004,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       rgn.localDestroyRegion();
       return;
     }
+
     final String rgnName = getUniqueName();
 
     SerializableRunnable create = new SerializableRunnable("testTXSimpleOps: Create Region") {
@@ -6098,7 +6025,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXSimpleOps: Created region");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6115,7 +6042,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXSimpleOps: Created Key");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6352,6 +6279,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * Tests that the push of a loaded value does not cause a conflict
    * on the side receiving the update
    */
+  @Ignore("TODO: this test always hits early out")
   @Test
   public void testTXUpdateLoadNoConflict() throws Exception {
     /*
@@ -6361,17 +6289,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     if(true) {
       return;
     }
-    
-    if (!supportsTransactions()) {
-      return;
-    }
+
+    assumeTrue(supportsTransactions());
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
+    assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
     CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
 
-    if (getRegionAttributes().getScope().isGlobal()
-        || getRegionAttributes().getDataPolicy().withPersistence()) {
-      return;
-    }
     final String rgnName = getUniqueName();
 
     SerializableRunnable create = new SerializableRunnable("testTXUpdateLoadNoConflict: Create Region & Load value") {
@@ -6399,7 +6324,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           flushIfNecessary(rgn);
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6593,21 +6518,18 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       getSystem().getLogWriter().fine("testTXUpdateLoadNoConflict: Caused exception in createRegion");
       throw e;
     }
-
   }
 
   @Test
   public void testTXMultiRegion() throws Exception {
-    if (!supportsTransactions()) {
-      return;
-    }
+    assumeTrue(supportsTransactions());
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
+    assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
+
     CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
 
-    if (getRegionAttributes().getScope().isGlobal()
-        || getRegionAttributes().getDataPolicy().withPersistence()) {
-      return;
-    }
     final String rgnName1 = getUniqueName() + "MR1";
     final String rgnName2 = getUniqueName() + "MR2";
     final String rgnName3 = getUniqueName() + "MR3";
@@ -6622,7 +6544,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created region1");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6634,7 +6556,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created key");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6649,7 +6571,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created region2");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6662,7 +6584,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Key");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6677,7 +6599,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Region");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -6690,7 +6612,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Key");
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -7335,21 +7257,17 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       getSystem().getLogWriter().fine("testTXMultiRegion: Caused exception in createRegion");
       throw e;
     }
-
   }
 
   @Test
   public void testTXRmtMirror() throws Exception {
-    if (!supportsTransactions()) {
-      return;
-    }
+    assumeTrue(supportsTransactions());
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
+    assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
     CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
 
-    if (getRegionAttributes().getScope().isGlobal()
-        || getRegionAttributes().getDataPolicy().withPersistence()) {
-      return;
-    }
     final String rgnName = getUniqueName();
 
     SerializableRunnable createMirror = new SerializableRunnable("textTXRmtMirror: Create Mirrored Region") {
@@ -7363,7 +7281,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           createRegion(rgnName, rgnAtts.create());
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -7379,7 +7297,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           createRegion(rgnName, rgnAtts.create());
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -7470,17 +7388,15 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       getSystem().getLogWriter().fine("textTXRmtMirror: Caused exception in createRegion");
       throw e;
     }
-
   }
 
   @Ignore("TODO")
   @Test
   public void testTXAlgebra() throws Exception {
+    assumeFalse(getRegionAttributes().getScope().isGlobal());
+    assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
+
     assertTrue(getRegionAttributes().getScope().isDistributed());
-    if (getRegionAttributes().getScope().isGlobal()
-        || getRegionAttributes().getDataPolicy().withPersistence()) {
-      return;
-    }
 
     CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
     MyTransactionListener localTl = new MyTransactionListener();
@@ -7505,7 +7421,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         }
         catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
+          fail("While creating region", e);
         }
       }
     };
@@ -7806,7 +7722,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               try {
                 rgn1.create("key", null);
               } catch (CacheException e) {
-                com.gemstone.gemfire.test.dunit.Assert.fail("While creating key", e);
+                fail("While creating key", e);
               }
             }
           });
@@ -7991,11 +7907,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               try {
                 bridge.start();
               } catch (IOException ex) {
-                com.gemstone.gemfire.test.dunit.Assert.fail("While creating bridge", ex);
+                fail("While creating bridge", ex);
               }
             }
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8003,8 +7919,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     SerializableRunnable asserter = new SerializableRunnable("ensure tombstone has been received") {
       public void run() {
         RegionEntry entry = CCRegion.getRegionEntry("object2");
-        Assert.assertTrue(entry != null);
-        Assert.assertTrue(entry.isTombstone());
+        assertTrue(entry != null);
+        assertTrue(entry.isTombstone());
       }
     };
     
@@ -8078,7 +7994,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8096,7 +8012,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+          fail("while performing concurrent operations", e);
         }
 //        } catch (InterruptedException e) {
 //          fail("someone interrupted my sleep");
@@ -8157,7 +8073,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             long events = CCRegion.getCachePerfStats().getDeltaFailedUpdates();
             assertTrue("expected some failed deltas", events>0);
           } catch (CacheException e) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+            fail("while performing concurrent operations", e);
           }
         }
       };
@@ -8266,7 +8182,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           boolean includeClear = true;
           doOpsLoop(msToRun, includeClear);
         } catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+          fail("while performing concurrent operations", e);
         }
       }
     };
@@ -8281,7 +8197,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           f.setDiskSynchronous(syncDiskWrite);
           CCRegion = (LocalRegion)f.create(name);
         } catch (CacheException ex) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+          fail("While creating region", ex);
         }
       }
     };
@@ -8380,7 +8296,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8396,7 +8312,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         try {
           doOpsLoop(5000, true);
         } catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+          fail("while performing concurrent operations", e);
         }
 //        } catch (InterruptedException e) {
 //          fail("someone interrupted my sleep");
@@ -8493,7 +8409,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8516,7 +8432,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException e) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("while performing destroy operations", e);
+            fail("while performing destroy operations", e);
           }
 //          OSProcess.printStacks(0, getLogWriter(), false);
         }
@@ -8543,7 +8459,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           };
           try {
             Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT+10000, 1000, true);
-          } catch (AssertionFailedError e) {
+          } catch (AssertionError e) {
             CCRegion.dumpBackingMap();
             com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
             throw e;
@@ -8593,12 +8509,12 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             if (CCRegion.getScope().isDistributedNoAck()) {
               sendSerialMessageToAll(); // flush the ops
             }
-          } catch (AssertionFailedError e) {
+          } catch (AssertionError e) {
             CCRegion.dumpBackingMap();
             com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
             throw e;
           } catch (CacheException e) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("while performing create/destroy operations", e);
+            fail("while performing create/destroy operations", e);
           }
         }
       });
@@ -8631,7 +8547,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException e) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("while performing create operations", e);
+            fail("while performing create operations", e);
           }
         }
       });
@@ -8714,7 +8630,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8734,7 +8650,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+          fail("while performing concurrent operations", e);
         }
       }
     };
@@ -8895,7 +8811,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+            fail("While creating region", ex);
           }
         }
       };
@@ -8915,7 +8831,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
+          fail("while performing concurrent operations", e);
         }
       }
     };
@@ -9033,7 +8949,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
           CCRegion = (LocalRegion)f.create(regionName);
         } catch (CacheException ex) {
-          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
+          fail("While creating region", ex);
         }
       }
     };
@@ -9085,7 +9001,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
   
   /**
    * The number of milliseconds to try repeating validation code in the
-   * event that AssertionFailedError is thrown.  For ACK scopes, no
+   * event that AssertionError is thrown.  For ACK scopes, no
    * repeat should be necessary.
    */
   protected long getRepeatTimeoutMs() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
index 2da1ed7..1eeb882 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
@@ -16,17 +16,13 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.util.Properties;
 
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.DataPolicy;
@@ -37,6 +33,8 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * Tests the performance of Regions when Roles are assigned.
@@ -46,10 +44,6 @@ import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 @Category(DistributedTest.class)
 public class RolePerformanceDUnitTest extends JUnit4CacheTestCase {
 
-  public RolePerformanceDUnitTest() {
-    super();
-  }
-
   /**
    * Compares times required for series of puts with Roles assigned to
    * series of puts with no Roles assigned. Scope is D_ACK.
@@ -69,7 +63,7 @@ public class RolePerformanceDUnitTest extends JUnit4CacheTestCase {
         break;
       }
       // only catch assertion failures...
-      catch (junit.framework.AssertionFailedError e) {
+      catch (AssertionError e) {
         if (i == maxAttempts) {
           throw e;
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
index b7a3ad7..73fe24c 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
@@ -26,18 +26,10 @@
  * @see MultiVMRegionTestCase
  *
  */
-
 package com.gemstone.gemfire.cache30;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.HashSet;
@@ -45,9 +37,8 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 
-import junit.framework.AssertionFailedError;
-
 import org.junit.Ignore;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.SystemFailure;
@@ -82,28 +73,25 @@ import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.TXState;
 import com.gemstone.gemfire.internal.cache.TXStateInterface;
 import com.gemstone.gemfire.internal.cache.TXStateProxyImpl;
-//import com.gemstone.gemfire.internal.cache.locks.TXLockId;
 import com.gemstone.gemfire.internal.cache.locks.TXLockBatch;
 import com.gemstone.gemfire.internal.cache.locks.TXLockService;
 import com.gemstone.gemfire.internal.cache.locks.TXLockServiceImpl;
 import com.gemstone.gemfire.test.dunit.Assert;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
-import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 import com.gemstone.gemfire.test.junit.categories.FlakyTest;
 
 @Category(DistributedTest.class)
 public class TXDistributedDUnitTest extends JUnit4CacheTestCase {
-  public TXDistributedDUnitTest() {
-    super();
-  }
 
   protected RegionAttributes getRegionAttributes() {
     return this.getRegionAttributes(Scope.DISTRIBUTED_ACK);
@@ -760,7 +748,7 @@ public class TXDistributedDUnitTest extends JUnit4CacheTestCase {
               assertNotNull(re);
               assertEquals("val1_3", re.getValue());
               break;
-            } catch (AssertionFailedError e) {
+            } catch (AssertionError e) {
               if (giveUp > System.currentTimeMillis()) {
                 throw e;
               }
@@ -1329,8 +1317,9 @@ public class TXDistributedDUnitTest extends JUnit4CacheTestCase {
     }
   }
   
-  @Ignore("Disabled for 51260")
-  public void DISABLED_testRemoteCommitFailure() {
+  @Ignore("TODO: Disabled for #51260")
+  @Test
+  public void testRemoteCommitFailure() throws Exception {
     try {
     disconnectAllFromDS();
     final String rgnName1= getUniqueName()  + "_1";



[5/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/distributed/DistributedLockServiceDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/distributed/DistributedLockServiceDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/distributed/DistributedLockServiceDUnitTest.java
index c53360d..ebd0b95 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/distributed/DistributedLockServiceDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/distributed/DistributedLockServiceDUnitTest.java
@@ -16,15 +16,8 @@
  */
 package com.gemstone.gemfire.distributed;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -34,8 +27,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import junit.framework.Assert;
-import junit.framework.AssertionFailedError;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
@@ -52,7 +45,6 @@ import com.gemstone.gemfire.distributed.internal.locks.RemoteThread;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.internal.util.StopWatch;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
@@ -63,32 +55,24 @@ import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
-import org.junit.experimental.categories.Category;
-
 /**
  * This class tests distributed ownership via the DistributedLockService api.
  */
 @Category(DistributedTest.class)
 public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
-  
-	protected static DistributedSystem dlstSystem;
+
+  private static DistributedSystem dlstSystem;
   private static DistributedLockBlackboard blackboard;
-  protected static Object monitor = new Object();
+  private static Object monitor = new Object();
 
   private int hits = 0;
   private int completes = 0;
   private boolean done;
   private boolean got;
 
-
-  public DistributedLockServiceDUnitTest() {
-    super();
-  }
-  
-  /////////// Test lifecycle //////////
-
   /**
    * Returns a previously created (or new, if this is the first
    * time this method is called in this VM) distributed system
@@ -141,8 +125,6 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     DLockService.dumpAllServices();
   }
 
-  ///////// Remote setup/teardown support
-
   /**
    * Connects a DistributedSystem, saves it in static variable "system"
    */
@@ -150,8 +132,6 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     dlstSystem = (new DistributedLockServiceDUnitTest()).getSystem();
   }
 
-  /////////  Public test methods
-  
   @Test
   public void testBasic() {
     String serviceName = getUniqueName();
@@ -286,6 +266,7 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
   protected static volatile boolean stop_testFairness;
   protected static volatile boolean[] done_testFairness = new boolean[16];
   static { Arrays.fill(done_testFairness, true); }
+  
   @Test
   public void testFairness() throws Exception {
     final String serviceName = "testFairness_" + getUniqueName();
@@ -587,7 +568,7 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
           logInfo("[testBasicGrantorRecovery] succeeded attempt " + attempt);
           break; // success
         }
-        catch (AssertionFailedError e) {
+        catch (AssertionError e) {
           logInfo("[testBasicGrantorRecovery] failed attempt " + attempt);
           if (attempt == attempts-1) throw e;
         }
@@ -1004,13 +985,13 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     DistributedLockService service = 
         DistributedLockService.getServiceNamed(serviceName);
 
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
     
     service.lock(objName, -1, -1);
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.isHeldByCurrentThread(objName));
     
     service.unlock(objName);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
   }
   
   @Test
@@ -1049,22 +1030,22 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     DistributedLockService service = 
         DistributedLockService.getServiceNamed(serviceName);
 
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
     
     // initial lock...
-    Assert.assertTrue(service.lock(objName, -1, -1));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, -1));
+    assertTrue(service.isHeldByCurrentThread(objName));
 
     // recursion +1...
-    Assert.assertTrue(service.lock(objName, -1, -1));
+    assertTrue(service.lock(objName, -1, -1));
 
     // recursion -1...    
     service.unlock(objName);
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.isHeldByCurrentThread(objName));
 
     // and unlock...    
     service.unlock(objName);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
   }
   
   @Test
@@ -1079,19 +1060,19 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     DistributedLockService service = 
         DistributedLockService.getServiceNamed(serviceName);
 
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
     
     // initial lock...
-    Assert.assertTrue(service.lock(objName, -1, leaseMs));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, leaseMs));
+    assertTrue(service.isHeldByCurrentThread(objName));
 
     // recursion +1...
-    Assert.assertTrue(service.lock(objName, -1, leaseMs));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, leaseMs));
+    assertTrue(service.isHeldByCurrentThread(objName));
     
     // expire...
     sleep(waitBeforeLockingMs);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
 
     // should fail...
     try {
@@ -1101,31 +1082,31 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     }
     
     // relock it...
-    Assert.assertTrue(service.lock(objName, -1, leaseMs));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, leaseMs));
+    assertTrue(service.isHeldByCurrentThread(objName));
 
     // and unlock to verify no recursion...    
     service.unlock(objName);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName)); // throws failure!!
+    assertTrue(!service.isHeldByCurrentThread(objName)); // throws failure!!
     
     // go thru again in different order...
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
     
     // initial lock...
-    Assert.assertTrue(service.lock(objName, -1, leaseMs));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, leaseMs));
+    assertTrue(service.isHeldByCurrentThread(objName));
 
     // expire...
     sleep(waitBeforeLockingMs);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
 
     // relock it...
-    Assert.assertTrue(service.lock(objName, -1, leaseMs));
-    Assert.assertTrue(service.isHeldByCurrentThread(objName));
+    assertTrue(service.lock(objName, -1, leaseMs));
+    assertTrue(service.isHeldByCurrentThread(objName));
     
     // and unlock to verify no recursion...    
     service.unlock(objName);
-    Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+    assertTrue(!service.isHeldByCurrentThread(objName));
   }
   
   @Test
@@ -1162,7 +1143,7 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
     sleep(waitBeforeLockingMs);
 
     if (waitBeforeLockingMs > leaseMs) {
-      Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+      assertTrue(!service.isHeldByCurrentThread(objName));
     }
     
     LogWriterUtils.getLogWriter().fine("[testLeaseExpires] acquire lock that expired");
@@ -1172,7 +1153,7 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
       public void run() {
         resultHolder[0] = service.lock(objName, -1, -1);
         service.unlock(objName);
-        Assert.assertTrue(!service.isHeldByCurrentThread(objName));
+        assertTrue(!service.isHeldByCurrentThread(objName));
       }
     });
     thread.start();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/SSLConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/SSLConfigJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/SSLConfigJUnitTest.java
index 543574a..d862392 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/SSLConfigJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/SSLConfigJUnitTest.java
@@ -16,15 +16,12 @@
  */
 package com.gemstone.gemfire.internal;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.*;
 
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 
-import junit.framework.AssertionFailedError;
-
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -34,7 +31,6 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Test that DistributionConfigImpl handles SSL options correctly.
- * 
  */
 @Category(IntegrationTest.class)
 public class SSLConfigJUnitTest {
@@ -49,7 +45,6 @@ public class SSLConfigJUnitTest {
   private static final Properties GATEWAY_SSL_PROPS_MAP = new Properties();
   private static final Properties GATEWAY_PROPS_SUBSET_MAP = new Properties();
   
-
   static {
     
     SSL_PROPS_MAP.put("javax.net.ssl.keyStoreType", "jks");
@@ -104,8 +99,6 @@ public class SSLConfigJUnitTest {
 
   }
   
-  //----- test methods ------
-
   @Test
   public void testMCastPortWithSSL() throws Exception {
     Properties props = new Properties( );
@@ -143,7 +136,7 @@ public class SSLConfigJUnitTest {
   }
   
   @Test
-  public void testConfigCopyWithSSL( ) throws Exception {
+  public void testConfigCopyWithSSL() throws Exception {
     boolean sslenabled = false;
     String sslprotocols = "any";
     String sslciphers = "any";
@@ -332,7 +325,6 @@ public class SSLConfigJUnitTest {
     isEqual( config.getGatewaySSLRequireAuthentication(), gatewaySslRequireAuth );
   }
   
-
   @Test
   public void testManagerConfig() throws Exception {
     boolean sslenabled = false;
@@ -383,7 +375,6 @@ public class SSLConfigJUnitTest {
     isEqual( config.getJmxManagerSSLRequireAuthentication(), jmxManagerSslRequireAuth );
   }
   
-  
   @Test
   public void testCacheServerConfig() throws Exception {
     boolean sslenabled = false;
@@ -456,7 +447,6 @@ public class SSLConfigJUnitTest {
   
   @Test
   public void testCustomizedClusterSslConfig() throws Exception {
-    
     boolean sslenabled = true;
     String  sslprotocols = "SSLv1";
     String  sslciphers = "RSA_WITH_NOTHING";
@@ -622,7 +612,6 @@ public class SSLConfigJUnitTest {
     
     clusterSSLProperties = config.getClusterSSLProperties();
     isEqual( SSL_PROPS_MAP, clusterSSLProperties );
-    
   }
   
   @Test
@@ -691,7 +680,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.SERVER_SSL_CIPHERS_NAME, cacheServerSslciphers);
     gemFireProps.put(DistributionConfig.SERVER_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(cacheServerSslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforCS(false));
+    gemFireProps.putAll(getGfSecurityPropertiesForCS(false));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -734,7 +723,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_CIPHERS_NAME, gatewaySslciphers);
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(gatewaySslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforGateway(false));
+    gemFireProps.putAll(getGfSecurityPropertiesForGateway(false));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -752,7 +741,6 @@ public class SSLConfigJUnitTest {
     isEqual( GATEWAY_SSL_PROPS_MAP.get("gateway-ssl-keystore-password"), config.getGatewaySSLKeyStorePassword());
     isEqual( GATEWAY_SSL_PROPS_MAP.get("gateway-ssl-truststore"), config.getGatewaySSLTrustStore());
     isEqual( GATEWAY_SSL_PROPS_MAP.get("gateway-ssl-truststore-password"),config.getGatewaySSLTrustStorePassword());
-    
   }
   
   @Test
@@ -804,7 +792,6 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getJmxManagerSSLTrustStorePassword());
   }
   
-  
   @Test
   public void testPartialCustomizedCacheServerSslConfig() throws Exception {
     boolean sslenabled = false;
@@ -828,7 +815,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.SERVER_SSL_CIPHERS_NAME, cacheServerSslciphers);
     gemFireProps.put(DistributionConfig.SERVER_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(cacheServerSslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforCS(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForCS(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -877,7 +864,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_CIPHERS_NAME, gatewaySslciphers);
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(gatewaySslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforGateway(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForGateway(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -901,11 +888,10 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-keystore-password"), config.getGatewaySSLKeyStorePassword());
     isEqual( GATEWAY_PROPS_SUBSET_MAP.get("gateway-ssl-truststore"), config.getGatewaySSLTrustStore());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getGatewaySSLTrustStorePassword());
-
   }
   
   @Test
-  public void testP2pSSLPropsOverriden_ServerPropsNotOverriden(){
+  public void testP2pSSLPropsOverriden_ServerPropsNotOverriden() throws Exception {
     boolean sslenabled = true;
     String  sslprotocols = "overrriden";
     String  sslciphers = "overrriden";
@@ -923,7 +909,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.CLUSTER_SSL_CIPHERS_NAME, sslciphers);
     gemFireProps.put(DistributionConfig.CLUSTER_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(requireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforCS(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForCS(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -954,11 +940,10 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-keystore-password"), config.getServerSSLKeyStorePassword());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore"), config.getServerSSLTrustStore());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getServerSSLTrustStorePassword());
-    
   }
   
   @Test
-  public void testP2pSSLPropsOverriden_ServerPropsOverriden(){
+  public void testP2pSSLPropsOverriden_ServerPropsOverriden() throws Exception {
     boolean sslenabled = true;
     String  sslprotocols = "overrriden";
     String  sslciphers = "overrriden";
@@ -981,7 +966,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.SERVER_SSL_CIPHERS_NAME, cacheServerSslciphers);
     gemFireProps.put(DistributionConfig.SERVER_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(cacheServerSslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforCS(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForCS(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -1015,7 +1000,7 @@ public class SSLConfigJUnitTest {
   }
   
   @Test
-  public void testClusterSSLPropsOverriden_GatewayPropsNotOverriden(){
+  public void testClusterSSLPropsOverriden_GatewayPropsNotOverriden() throws Exception {
     boolean sslenabled = true;
     String  sslprotocols = "overrriden";
     String  sslciphers = "overrriden";
@@ -1033,7 +1018,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.CLUSTER_SSL_CIPHERS_NAME, sslciphers);
     gemFireProps.put(DistributionConfig.CLUSTER_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(requireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforGateway(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForGateway(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -1064,11 +1049,10 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-keystore-password"), config.getGatewaySSLKeyStorePassword());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore"), config.getGatewaySSLTrustStore());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getGatewaySSLTrustStorePassword());
-    
   }
   
   @Test
-  public void testP2pSSLPropsOverriden_GatewayPropsOverriden(){
+  public void testP2pSSLPropsOverriden_GatewayPropsOverridden() throws Exception {
     boolean sslenabled = true;
     String  sslprotocols = "overrriden";
     String  sslciphers = "overrriden";
@@ -1091,7 +1075,7 @@ public class SSLConfigJUnitTest {
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_CIPHERS_NAME, gatewayServerSslciphers);
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_REQUIRE_AUTHENTICATION_NAME, String.valueOf(gatewayServerSslRequireAuth));
 
-    gemFireProps.putAll(getGfSecurityPropertiesforGateway(true));
+    gemFireProps.putAll(getGfSecurityPropertiesForGateway(true));
 
     DistributionConfigImpl config = new DistributionConfigImpl( gemFireProps );
     isEqual( config.getClusterSSLEnabled(), sslenabled );
@@ -1117,11 +1101,10 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-keystore-password"), config.getGatewaySSLKeyStorePassword());
     isEqual( GATEWAY_PROPS_SUBSET_MAP.get("gateway-ssl-truststore"), config.getGatewaySSLTrustStore());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getGatewaySSLTrustStorePassword());
-    
   }
   
   @Test
-  public void testP2pSSLPropsOverriden_JMXPropsNotOverriden(){
+  public void testP2pSSLPropsOverriden_JMXPropsNotOverriden() throws Exception {
     boolean sslenabled = true;
     String  sslprotocols = "overrriden";
     String  sslciphers = "overrriden";
@@ -1170,7 +1153,6 @@ public class SSLConfigJUnitTest {
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-keystore-password"), config.getJmxManagerSSLKeyStorePassword());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore"), config.getJmxManagerSSLTrustStore());
     isEqual( CLUSTER_SSL_PROPS_MAP.get("cluster-ssl-truststore-password"),config.getJmxManagerSSLTrustStorePassword()); 
-    
   }
   
   private static Properties getGfSecurityPropertiesSSL() {
@@ -1224,7 +1206,7 @@ public class SSLConfigJUnitTest {
     return gfSecurityProps;
   }
   
-  private static Properties getGfSecurityPropertiesforCS(boolean partialCSSslConfigOverride) {
+  private static Properties getGfSecurityPropertiesForCS(boolean partialCSSslConfigOverride) {
     Properties gfSecurityProps = new Properties();
 
     Set<Entry<Object, Object>> entrySet = CLUSTER_SSL_PROPS_MAP.entrySet();
@@ -1245,7 +1227,7 @@ public class SSLConfigJUnitTest {
     return gfSecurityProps;
   }
 
-  private static Properties getGfSecurityPropertiesforGateway(boolean partialGatewaySslConfigOverride) {
+  private static Properties getGfSecurityPropertiesForGateway(boolean partialGatewaySslConfigOverride) {
     Properties gfSecurityProps = new Properties();
 
     Set<Entry<Object, Object>> entrySet = CLUSTER_SSL_PROPS_MAP.entrySet();
@@ -1265,11 +1247,11 @@ public class SSLConfigJUnitTest {
     return gfSecurityProps;
   }
   
-  public void isEqual( boolean a, boolean e ) throws AssertionFailedError {
-    assertEquals( a, e );
+  private void isEqual(boolean a, boolean e) {
+    assertEquals(a,e );
   }
   
-  public void isEqual( Object a, Object e ) throws AssertionFailedError {
+  private void isEqual(Object a,Object e ) {
     assertEquals( a, e );
   } 
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug34179TooManyFilesOpenJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug34179TooManyFilesOpenJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug34179TooManyFilesOpenJUnitTest.java
index 4eea76d..cbc5e4c 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug34179TooManyFilesOpenJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug34179TooManyFilesOpenJUnitTest.java
@@ -14,25 +14,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.fail;
-
 import java.io.File;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
-
 /**
  * Disk region perf test for Persist only with Async writes and  Buffer.
  * Set Rolling oplog to true and setMaxOplogSize to 10240
@@ -43,25 +36,25 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * The disk properties will ensure that very many oplog files are created.
  * 
  * This test is currently not being executed and is marked with an underscore
- *  
  */
 @Category(IntegrationTest.class)
-public class Bug34179TooManyFilesOpenJUnitTest extends DiskRegionTestingBase
-{
+public class Bug34179TooManyFilesOpenJUnitTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private static int ENTRY_SIZE = 1024;
+
+  private static int OP_COUNT = 100000;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private LogWriter log = null;
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
  
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     File file1 = new File("testingDirectory/" + getName()+ "1");
     file1.mkdir();
     file1.deleteOnExit();
-   dirs = new File[1];
-   dirs[0] = file1;
+    dirs = new File[1];
+    dirs[0] = file1;
     diskProps.setDiskDirs(dirs);
      
     diskProps.setPersistBackup(true);
@@ -75,43 +68,24 @@ public class Bug34179TooManyFilesOpenJUnitTest extends DiskRegionTestingBase
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-    
-  }
-
-  
-  private static int ENTRY_SIZE = 1024;
-
-  private static int OP_COUNT = 100000;
-  
   /**
    * currently not being executed for congo but after transition to JDK 1.5, this test should be executed.
-   *
    */
-  public void _testPopulate1kbwrites()
-  {
-    try {
-      final byte[] value = new byte[ENTRY_SIZE];
-      Arrays.fill(value, (byte)77);
-      for (int i = 0; i < OP_COUNT; i++) {
-        region.put(new Integer(i), value);
-      }
-      closeDown(); // closes disk file which will flush all buffers
-    }
-    catch (Exception ex) {
-      fail("IOException occured due to " + ex);
+  @Ignore("TODO: test is disabled")
+  @Test
+  public void testPopulate1kbwrites() {
+    final byte[] value = new byte[ENTRY_SIZE];
+    Arrays.fill(value, (byte)77);
+    for (int i = 0; i < OP_COUNT; i++) {
+      region.put(new Integer(i), value);
     }
-
+    closeDown(); // closes disk file which will flush all buffers
   }
+
   /**
    * cleans all the directory of all the files present in them
-   *
    */
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     for (int i = 0; i < dirs.length; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
@@ -120,11 +94,4 @@ public class Bug34179TooManyFilesOpenJUnitTest extends DiskRegionTestingBase
     }
   }
   
-  @Test
-  public void testDoNothing(){
-    //dummy method to ensure at least one test is present in this file if the other tests are commented
-  }
-  
-  
 }// end of Bug34179TooManyFilesOpenJUnitTest
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ComplexDiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ComplexDiskRegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ComplexDiskRegionJUnitTest.java
index bc33f3a..1506f96 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ComplexDiskRegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ComplexDiskRegionJUnitTest.java
@@ -16,43 +16,31 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import org.junit.After;
-import org.junit.Before;
+import static org.junit.Assert.*;
+
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.internal.cache.DirectoryHolder;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Unit testing for ComplexDiskRegion API's
- * 
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class ComplexDiskRegionJUnitTest extends DiskRegionTestingBase
-{
+public class ComplexDiskRegionJUnitTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-
-  
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
   @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
   }
 
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
   }
 
@@ -63,8 +51,7 @@ public class ComplexDiskRegionJUnitTest extends DiskRegionTestingBase
    * The test will test that an oplog is correctly being added to be rolled
    */
   @Test
-  public void testAddToBeCompacted()
-  {
+  public void testAddToBeCompacted() {
     deleteFiles();
     diskProps.setRolling(false);
     diskProps.setAllowForceCompaction(true);
@@ -113,11 +100,9 @@ public class ComplexDiskRegionJUnitTest extends DiskRegionTestingBase
     oplog3.destroy();
     closeDown();
     deleteFiles();
-
   }
 
   /**
-   *  
    * Test method for
    * 'com.gemstone.gemfire.internal.cache.ComplexDiskRegion.removeFirstOplog(Oplog)'
    * 
@@ -125,8 +110,7 @@ public class ComplexDiskRegionJUnitTest extends DiskRegionTestingBase
    * the firs to be rolled).
    */
   @Test
-  public void testRemoveFirstOplog()
-  {
+  public void testRemoveFirstOplog() {
     deleteFiles();
     diskProps.setRolling(false);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentFlushingAndRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentFlushingAndRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentFlushingAndRegionOperationsJUnitTest.java
index 0cab6f9..aeef265 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentFlushingAndRegionOperationsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentFlushingAndRegionOperationsJUnitTest.java
@@ -16,12 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-import junit.framework.Assert;
+import static org.junit.Assert.*;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -36,25 +32,9 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * A region operation is done on the same key that is about to be rolled or has
  * just been rolled and the region operation is verified to have been correctly
  * executed.
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
-    DiskRegionTestingBase
-{
-
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+public class ConcurrentFlushingAndRegionOperationsJUnitTest extends DiskRegionTestingBase {
 
   protected boolean alreadyComeHere = false;
 
@@ -84,8 +64,8 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
     region.put("Key", "Value1");
     ((LocalRegion)region).getDiskRegion().flushForTesting();
     try {
-      Assert.assertEquals("Value2", region.get("Key"));
-      Assert.assertEquals("Value2", getValueOnDisk(region));
+      assertEquals("Value2", region.get("Key"));
+      assertEquals("Value2", getValueOnDisk(region));
     }
     catch (EntryNotFoundException e) {
       logWriter.error("Exception occured", e);
@@ -118,7 +98,7 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
     region.put("Key", "Value1");
     ((LocalRegion)region).getDiskRegion().flushForTesting();
     try {
-      Assert.assertEquals("Value1", getValueOnDisk(region));
+      assertEquals("Value1", getValueOnDisk(region));
     }
     catch (EntryNotFoundException e) {
       logWriter.error("Exception occured", e);
@@ -320,7 +300,7 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
           long newOplogOffset = id.getOffsetInOplog();
           id.setOplogId(oldOplogId);
           id.setOffsetInOplog(oldOplogOffset);
-          Assert.assertEquals("Value1", ((LocalRegion)region).getDiskRegion()
+          assertEquals("Value1", ((LocalRegion)region).getDiskRegion()
               .getNoBuffer(id));
           id.setOplogId(newOplogId);
           id.setOffsetInOplog(newOplogOffset);
@@ -336,7 +316,7 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
     ((LocalRegion)region).getDiskRegion().flushForTesting();
     try {
 //       region.getCache().getLogger().info("getting value2");
-      Assert.assertEquals("Value2", region.get("Key"));
+      assertEquals("Value2", region.get("Key"));
     }
     catch (Exception e) {
       logWriter.error("Exception occured", e);
@@ -346,7 +326,7 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
     ((LocalRegion)region).getDiskRegion().flushForTesting();
 
     try {
-      Assert.assertEquals("Value2", getValueOnDisk(region));
+      assertEquals("Value2", getValueOnDisk(region));
     }
     catch (EntryNotFoundException e) {
       logWriter.error("Exception occured", e);
@@ -372,7 +352,7 @@ public class ConcurrentFlushingAndRegionOperationsJUnitTest extends
     region.put("Key", "Value1");
     ((LocalRegion)region).getDiskRegion().flushForTesting();
     try {
-      Assert.assertEquals("Value1", getValueOnDisk(region));
+      assertEquals("Value1", getValueOnDisk(region));
     }
     catch (EntryNotFoundException e) {
       logWriter.error("Exception occured", e);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
index 4fb244a..cbc6784 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
@@ -14,20 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-/**
- * 
- */
 package com.gemstone.gemfire.internal.cache;
 
-import org.junit.Ignore;
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+import static com.gemstone.gemfire.test.dunit.Assert.*;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -37,7 +26,9 @@ import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import junit.framework.AssertionFailedError;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.Delta;
 import com.gemstone.gemfire.InvalidDeltaException;
@@ -54,29 +45,29 @@ import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 import com.gemstone.gemfire.cache.client.internal.DestroyOp;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
-import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
-import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * tests for the concurrentMapOperations. there are more tests in ClientServerMiscDUnitTest
- *
  */
 @Category(DistributedTest.class)
 public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
 
-  protected static final String REP_REG_NAME = "repRegion";
-  protected static final String PR_REG_NAME = "prRegion";
+  private static final String REP_REG_NAME = "repRegion";
+  private static final String PR_REG_NAME = "prRegion";
   private static final int MAX_ENTRIES = 113;
   
   enum OP {PUTIFABSENT, REPLACE, REMOVE}
@@ -101,11 +92,11 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
     });
   }
 
-  protected Region createReplicateRegion() {
+  private Region createReplicateRegion() {
     return getCache().createRegionFactory(RegionShortcut.REPLICATE).setConcurrencyChecksEnabled(true).create(REP_REG_NAME);
   }
 
-  protected Region createPartitionedRegion() {
+  private Region createPartitionedRegion() {
     return getCache().createRegionFactory(RegionShortcut.PARTITION).setConcurrencyChecksEnabled(true).create(PR_REG_NAME);
   }
 
@@ -173,8 +164,8 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
       }
     });
   }
-  
-   static abstract class AbstractConcMapOpsListener implements CacheListener<Integer, String> {
+
+  private static abstract class AbstractConcMapOpsListener implements CacheListener<Integer, String> {
     public void afterCreate(EntryEvent<Integer, String> event) {
       validate(event);
     }
@@ -201,15 +192,15 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
     }
     abstract void validate(EntryEvent event);
   }
-  
-  static class NotInvokedListener extends AbstractConcMapOpsListener {
+
+  private static class NotInvokedListener extends AbstractConcMapOpsListener {
     @Override
     void validate(EntryEvent event) {
       fail("should not be called.  Event="+event);
     }
   }
 
-  static class InitialCreatesListener extends AbstractConcMapOpsListener {
+  private static class InitialCreatesListener extends AbstractConcMapOpsListener {
     AtomicInteger numCreates = new AtomicInteger();
     @Override
     void validate(EntryEvent event) {
@@ -219,12 +210,6 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
       numCreates.incrementAndGet();
     }
   }
-  /**
-   * @param name
-   */
-  public ConcurrentMapOpsDUnitTest() {
-    super();
-  }
 
   // test for bug #42164
   @Test
@@ -397,7 +382,7 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
         final Region r = getCache().getRegion(REP_REG_NAME);
         final Region pr = getCache().getRegion(PR_REG_NAME);
         WaitCriterion wc = new WaitCriterion() {
-          AssertionFailedError e = null;
+          AssertionError e = null;
           public boolean done() {
             try {
               if (!emptyClient) {
@@ -414,7 +399,7 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
               assertNull(pr.putIfAbsent("keyForNull", null));
               assertNull(r.putIfAbsent("clientNullKey", null));
               assertNull(pr.putIfAbsent("clientNullKey", null));
-            } catch (AssertionFailedError ex) {
+            } catch (AssertionError ex) {
               r.getCache().getLoggerI18n().fine("SWAP:caught ", ex);
               e = ex;
               return false;
@@ -449,7 +434,7 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
         final Region r = getCache().getRegion(REP_REG_NAME);
         final Region pr = getCache().getRegion(PR_REG_NAME);
         WaitCriterion wc = new WaitCriterion() {
-          AssertionFailedError e = null;
+          AssertionError e = null;
           public boolean done() {
             try {
               assertEquals("value2", r.putIfAbsent("key0", null));
@@ -463,7 +448,7 @@ public class ConcurrentMapOpsDUnitTest extends JUnit4CacheTestCase {
               assertNull(r.replace("NoKeyOnServer", "value"));
               assertTrue(r.replace("clientNullKey", null, "newValue"));
               assertTrue(pr.replace("clientNullKey", null, "newValue"));
-            } catch (AssertionFailedError ex) {
+            } catch (AssertionError ex) {
               e = ex;
               return false;
             }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
index 754fb34..1e63ea1 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
@@ -108,9 +108,7 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
   private static int counter = 0;
 
   @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  protected final void postsetUp() throws Exception {
     counter++;
     if (longTest) {
       TIME_TO_RUN = 10000;
@@ -122,12 +120,6 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
     }
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
-
   @Test
   public void testPersistSyncConcurrency()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
index 4a8210f..fa6c3f9 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
@@ -16,31 +16,18 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.List;
 
 import junit.framework.Assert;
-
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.internal.cache.CacheObserver;
-import com.gemstone.gemfire.internal.cache.CacheObserverAdapter;
-import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
-import com.gemstone.gemfire.internal.cache.DiskEntry;
-import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
-import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
-import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
@@ -51,13 +38,9 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * A region operation is done on the same key that is about to be rolled or has
  * just been rolled and the region operation is verified to have been correctly
  * executed.
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class ConcurrentRollingAndRegionOperationsJUnitTest extends
-    DiskRegionTestingBase
-{
+public class ConcurrentRollingAndRegionOperationsJUnitTest extends DiskRegionTestingBase {
 
   protected volatile boolean hasBeenNotified;
 
@@ -65,12 +48,9 @@ public class ConcurrentRollingAndRegionOperationsJUnitTest extends
 
   protected boolean encounteredFailure = false;
 
-
-  @Before
-  public void setUp() throws Exception
-  {
+  @Override
+  protected final void preSetUp() throws Exception {
     this.hasBeenNotified = false;
-    super.setUp();
   }
 
   void putBeforeRoll(final Region region)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConflationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConflationJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConflationJUnitTest.java
index d7493de..238d641 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConflationJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConflationJUnitTest.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.*;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- * 
  * This test does a check that conflation in the buffer happen correctly
  * 
  * Conflation cases tested include:
@@ -47,198 +46,164 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * <li> create, invalidate, modify
  * </ul>
  * The test is done for persist only, overflow only and persist + overflow only (async modes).
- * 
- *
  */
 @Category(IntegrationTest.class)
-public class ConflationJUnitTest extends DiskRegionTestingBase
-{
+public class ConflationJUnitTest extends DiskRegionTestingBase {
+  
   private DiskRegionProperties diskProps = new DiskRegionProperties();
 
+  private long flushCount;
+
   @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setBytesThreshold(100000000);
     diskProps.setTimeInterval(100000000);
     diskProps.setSynchronous(false);
   }
 
-
-
-  protected void createOverflowOnly()
-  {
-    region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
-        diskProps);
-  }
-
-  protected void createPersistOnly()
-  {
+  private void createPersistOnly() {
     region = DiskRegionHelperFactory
         .getAsyncPersistOnlyRegion(cache, diskProps);
   }
 
-  protected void createOverflowAndPersist()
-  {
+  private void createOverflowAndPersist() {
     region = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
         diskProps);
   }
 
   /**
    * do a put followed by a put
-   *  
    */
-  void putAndPut()
-  {
+  private void putAndPut() {
     region.put(new Integer(1), new Integer(1));
     region.put(new Integer(1), new Integer(2));
   }
 
   /**
    * do a put followed by a destroy on the same entry
-   *  
    */
-  void putAndDestroy()
-  {
+  private void putAndDestroy() {
     region.put(new Integer(1), new Integer(1));
     try {
       region.destroy(new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
+      logWriter.error("Exception occurred",e);
       fail(" failed to destory Integer");
     }
   }
 
   /**
    * do a put destroy the same entry and put it again
-   *  
    */
-  void putDestroyPut()
-  {
+  private void putDestroyPut() {
     putAndDestroy();
     region.put(new Integer(1), new Integer(2));
   }
 
   /**
    * put a key and then invalidate it
-   *  
    */
-  void putAndInvalidate()
-  {
+  private void putAndInvalidate() {
     region.put(new Integer(1), new Integer(1));
     try {
       region.invalidate(new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed to invalidate Integer");
+      logWriter.error("Exception occurred",e);
+      throw new AssertionError(" failed to invalidate Integer", e);
     }
   }
 
   /**
    * put a key, invalidate it and the perform a put on it
-   *  
    */
-  void putInvalidatePut()
-  {
+  private void putInvalidatePut() {
     putAndInvalidate();
     region.put(new Integer(1), new Integer(2));
   }
 
   /**
    * do a create and then a put on the same key
-   *  
    */
-  void createAndPut()
-  {
+  private void createAndPut() {
     try {
       region.create(new Integer(1), new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-       fail(" failed in trying to create");
+      logWriter.error("Exception occurred",e);
+      throw new AssertionError(" failed in trying to create", e);
     }
     region.put(new Integer(1), new Integer(2));
   }
 
   /**
    * do a create and then a destroy
-   *  
    */
-  void createAndDestroy()
-  {
+  private void createAndDestroy() {
     try {
       region.create(new Integer(1), new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed in trying to create");
+      logWriter.error("Exception occurred", e);
+      throw new AssertionError("failed in trying to create", e);
     }
     try {
       region.destroy(new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed to destory Integer");
+      logWriter.error("Exception occurred", e);
+      throw new AssertionError("failed to destroy Integer", e);
     }
   }
 
   /**
    * do a create then destroy the entry and create it again
-   *  
    */
-  void createDestroyCreate()
-  {
+  private void createDestroyCreate() {
     createAndDestroy();
     try {
       region.create(new Integer(1), new Integer(2));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed in trying to create");
+      logWriter.error("Exception occurred", e);
+      throw new AssertionError("failed in trying to create", e);
     }
   }
 
   /**
    * create an entry and then invalidate it
-   *  
    */
-  void createAndInvalidate()
-  {
+  private void createAndInvalidate() {
     try {
       region.create(new Integer(1), new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed in trying to create");
+      logWriter.error("Exception occurred", e);
+      throw new AssertionError("failed in trying to create", e);
     }
     try {
       region.invalidate(new Integer(1));
     }
     catch (Exception e) {
-      logWriter.error("Exception occured",e);
-      fail(" failed to invalidate Integer");
+      logWriter.error("Exception occurred", e);
+      throw new AssertionError("failed to invalidate Integer", e);
     }
   }
 
   /**
    * create an entry, invalidate it and then perform a put on the same key
-   *  
    */
-  void createInvalidatePut()
-  {
+  private void createInvalidatePut() {
     createAndInvalidate();
     region.put(new Integer(1), new Integer(2));
   }
 
   /**
    * validate whether a modification of an entry was correctly done
-   *  
    */
-  void validateModification()
-  {
+  private void validateModification() {
     Collection entries = ((LocalRegion)region).entries.regionEntries();
     if (entries.size() != 1) {
       fail("expected size to be 1 but is not so");
@@ -247,16 +212,14 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
     DiskId id = ((DiskEntry)entry).getDiskId();
     Object obj = ((LocalRegion)region).getDiskRegion().get(id);
     if (!(obj.equals(new Integer(2)))) {
-      fail(" incorrect modification");
+      fail("incorrect modification");
     }
   }
 
   /**
    * validate whether nothing was written
-   */  
- 
-  void validateNothingWritten()
-  {
+   */
+  private void validateNothingWritten() {
     Collection entries = ((LocalRegion)region).entries.regionEntries();
     //We actually will have a tombstone in the region, hence
     //the 1 entry
@@ -264,19 +227,12 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
       fail("expected size to be 1 but is " + entries.size());
     }
     assertEquals(this.flushCount, getCurrentFlushCount());
-//     Oplog oplog = ((LocalRegion)region).getDiskRegion().getChild();
-//     if (oplog.getOplogSize() != 0) {
-//       fail(" expected zero bytes to have been written but is "
-//           + oplog.getOplogSize());
-//     }
   }
   
   /**
    * validate whether invalidate was done
-   *  
    */
-  void validateTombstone()
-  {
+  private void validateTombstone() {
     Collection entries = ((LocalRegion)region).entries.regionEntries();
     if (entries.size() != 1) {
       fail("expected size to be 1 but is " + entries.size());
@@ -289,10 +245,8 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
 
   /**
    * validate whether invalidate was done
-   *  
    */
-  void validateInvalidate()
-  {
+  private void validateInvalidate() {
     Collection entries = ((LocalRegion)region).entries.regionEntries();
     if (entries.size() != 1) {
       fail("expected size to be 1 but is " + entries.size());
@@ -305,31 +259,26 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
     }
   }
 
-  private long flushCount;
-
   private long getCurrentFlushCount() {
     return ((LocalRegion)region).getDiskStore().getStats().getFlushes();
   }
-  void pauseFlush() {
+
+  private void pauseFlush() {
     ((LocalRegion)region).getDiskRegion().pauseFlusherForTesting();
     this.flushCount = getCurrentFlushCount();
   }
   
   /**
    * force a flush on the region
-   *  
    */
-  void forceFlush()
-  {
+  private void forceFlush() {
     ((LocalRegion)region).getDiskRegion().flushForTesting();
   }
 
   /**
    * all the operations done here
-   *  
    */
-  void allTest()
-  {
+  private void allTest() {
     pauseFlush();
     createAndPut();
     forceFlush();
@@ -370,16 +319,13 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
     forceFlush();
     validateInvalidate();
     region.clear();
-
   }
 
   /**
    * test conflation for perist only
-   *  
    */
   @Test
-  public void testPersistOnlyConflation()
-  {
+  public void testPersistOnlyConflation() throws Exception {
     createPersistOnly();
     allTest();
     closeDown();
@@ -387,22 +333,11 @@ public class ConflationJUnitTest extends DiskRegionTestingBase
 
   /**
    * test conflation for overflow and persist
-   *  
    */
   @Test
-  public void testOverFlowAndPersistOnlyConflation()
-  {
-    try {
-      createOverflowAndPersist();
-      allTest();
-      closeDown();
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail(e.toString());
-    }
+  public void testOverFlowAndPersistOnlyConflation() throws Exception {
+    createOverflowAndPersist();
+    allTest();
+    closeDown();
   }
-
- 
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
index 93e8731..43fe7b0 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
@@ -38,39 +38,29 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-/**
- * 
- *
- */
-@Category(IntegrationTest.class)
-public class DiskRandomOperationsAndRecoveryJUnitTest extends DiskRegionTestingBase
-{
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-  private static int ENTRY_SIZE = 1024;
-  final static byte[] valueBytes = new byte[ENTRY_SIZE];
+@
+  Category(IntegrationTest.class)
+public class DiskRandomOperationsAndRecoveryJUnitTest extends DiskRegionTestingBase {
+
+  private static final int ENTRY_SIZE = 1024;
+
+  private static final byte[] valueBytes = new byte[ENTRY_SIZE];
   static {
     Arrays.fill(valueBytes, (byte)32);
   }
-  final static private Object value = new String(valueBytes);
 
-  // private static final boolean debug = false;
+  private static final Object value = new String(valueBytes);
 
-  private static int testId=0;
-  
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  private static int testId = 0;
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
+
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     testId++;
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
-
   private final static int ITERATIONS = 4;
   private final static long MAX_OPLOG_SIZE_IN_BYTES = 1024*16;
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
index c0f269f..47004a6 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
@@ -18,6 +18,7 @@ package com.gemstone.gemfire.internal.cache;
 
 import java.io.File;
 import java.io.FileWriter;
+import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.Properties;
 
@@ -39,67 +40,41 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * This test is for testing Disk attributes set programmatically
  * The generated cacheXml is used to create a cache and teh region
  * properties retested.
- * 
  */
 @Category(IntegrationTest.class)
-public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
-{
-  PrintWriter pw;
-
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps1 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps2 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps3 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps4 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps5 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps6 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps7 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps8 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps9 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps10 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps11 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps12 = new DiskRegionProperties();
-
-  Region region1;
-
-  Region region2;
-
-  Region region3;
-
-  Region region4;
-
-  Region region5;
-
-  Region region6;
-
-  Region region7;
-
-  Region region8;
-
-  Region region9;
-
-  Region region10;
-
-  Region region11;
-
-  Region region12;
-
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase {
+
+  private PrintWriter pw;
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps1 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps2 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps3 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps4 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps5 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps6 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps7 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps8 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps9 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps10 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps11 = new DiskRegionProperties();
+  private DiskRegionProperties diskProps12 = new DiskRegionProperties();
+
+  private Region region1;
+  private Region region2;
+  private Region region3;
+  private Region region4;
+  private Region region5;
+  private Region region6;
+  private Region region7;
+  private Region region8;
+  private Region region9;
+  private Region region10;
+  private Region region11;
+  private Region region12;
+
+  @Override
+  protected final void postSetUp() throws Exception {
     diskDirSize = new int[4];
     diskDirSize[0] = Integer.MAX_VALUE;
     diskDirSize[1] = Integer.MAX_VALUE;
@@ -119,15 +94,7 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskProps12.setDiskDirs(dirs);
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  public void createCacheXML()
-  {
+  private void createCacheXML() throws IOException {
     // create the region1 which is SyncPersistOnly and set DiskWriteAttibutes
     diskProps1.setRolling(true);
     diskProps1.setMaxOplogSize(1073741824L);
@@ -220,23 +187,14 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     region12 = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
         diskProps12);
 
-
     //cacheXmlGenerator: generates cacheXml file
-    try {
-      FileWriter fw = new FileWriter(new File("DiskRegCachexmlGeneratorJUnitTest.xml"));
-      PrintWriter pw = new PrintWriter(fw);
-      CacheXmlGenerator.generate(cache, pw);
-    }
-    catch (Exception ex) {
-      logWriter.error("Exception occured",ex);
-      fail("FAILED While cache xml generation");
-    }
-
+    FileWriter fw = new FileWriter(new File("DiskRegCachexmlGeneratorJUnitTest.xml"));
+    PrintWriter pw = new PrintWriter(fw);
+    CacheXmlGenerator.generate(cache, pw);
   }
   
   @Test
-  public void testVerifyCacheXml() throws Exception
-  {
+  public void testVerifyCacheXml() throws Exception {
     createCacheXML();
     ds.disconnect();
     // Connect to the GemFire distributed system
@@ -299,4 +257,3 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
   }
 
 }// end of DiskRegCachexmlGeneratorJUnitTest
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
index 34d540d..75701e1 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
@@ -16,10 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -30,30 +28,16 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- *  * Tests if callbacks are getting invoked correctly 
- *   * for 'create', 'update' and 'destroy' of disk region entries
- *    * with concurrent 'clear' 
- *      *
+ * Tests if callbacks are getting invoked correctly
+ * for 'create', 'update' and 'destroy' of disk region entries
+ * with concurrent 'clear'
  */
 @Category(IntegrationTest.class)
-public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase 
-{
+public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase {
 
-  volatile static boolean intoCreateAfterCbk = false;
-  volatile static boolean intoUpdateAfterCbk = false;
-  volatile static boolean intoDestroyAfterCbk = false;
-  
-  @Before
-  public void setUp() throws Exception
-  {  
-    super.setUp();
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+  private static volatile boolean intoCreateAfterCbk = false;
+  private static volatile boolean intoUpdateAfterCbk = false;
+  private static volatile boolean intoDestroyAfterCbk = false;
   
   private DiskRegionProperties getDiskRegionProperties(){
     DiskRegionProperties diskProperties = new DiskRegionProperties();
@@ -64,8 +48,7 @@ public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase
   }
     
   @Test
-  public void testAfterCallbacks()
-  {
+  public void testAfterCallbacks() {
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
       getDiskRegionProperties(), Scope.LOCAL);
 
@@ -112,6 +95,5 @@ public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase
     region.create("key3", "createValue");
     region.destroy("key3");
     assertTrue("Destroy callback not called", intoDestroyAfterCbk);
-	
-  }  
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
index ba3af6a..99ab7e6 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
@@ -16,16 +16,11 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -39,44 +34,31 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Miscellaneous disk tests
- * 
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOplogSwtchingAndRollerJUnitTest extends
-    DiskRegionTestingBase
-{
+public class DiskRegOplogSwtchingAndRollerJUnitTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private static File[] dirs1 = null;
 
-  protected boolean encounteredException = false;
+  private static int[] diskDirSize1 = null;
 
-  protected volatile boolean hasBeenNotified = false;
+  private volatile boolean hasBeenNotified = false;
 
-  protected static File[] dirs1 = null;
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  protected static int[] diskDirSize1 = null;
+  private boolean encounteredException = false;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
-  }
+  private Object forWaitNotify = new Object();
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+  private boolean gotNotification = false;
+
+  private Object getValOnDsk = null;
 
   /**
-   * tests non occurence of DiskAccessException
-   *  
+   * tests non occurrence of DiskAccessException
    */
   @Test
-  public void testSyncPersistRegionDAExp()
-  {
+  public void testSyncPersistRegionDAExp() {
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
     testingDirectory1.deleteOnExit();
@@ -114,8 +96,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }// end of testSyncPersistRegionDAExp
 
   @Test
-  public void testAsyncPersistRegionDAExp()
-  {
+  public void testAsyncPersistRegionDAExp() {
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
     testingDirectory1.deleteOnExit();
@@ -151,8 +132,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
   }// end of testAsyncPersistRegionDAExp
 
-  private void diskAccessExpHelpermethod(final Region region)
-  {
+  private void diskAccessExpHelpermethod(final Region region) {
     final byte[] value = new byte[990];
     Arrays.fill(value, (byte)77);
     try {
@@ -172,18 +152,11 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     }
   }
 
-  protected Object forWaitNotify = new Object();
-
-  protected boolean gotNotification = false;
-
   /**
    * DiskRegionRollingJUnitTest :
-   * 
-   *  
    */
   @Test
-  public void testSyncRollingHappening()
-  {
+  public void testSyncRollingHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -235,8 +208,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testSyncRollingNotHappening()
-  {
+  public void testSyncRollingNotHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -277,8 +249,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testAsyncRollingHappening()
-  {
+  public void testAsyncRollingHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -334,8 +305,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testAsyncRollingNotHappening()
-  {
+  public void testAsyncRollingNotHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -374,8 +344,6 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
   }
 
-  protected Object getValOnDsk = null;
-
   /**
    * DiskRegOplog1OverridingOplog2JUnitTest: Disk Region test : oplog1 flush
    * overriding oplog2 flush
@@ -383,11 +351,9 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * This test will hold the flush of oplog1 and flush oplog2 before it. After
    * that oplog1 is allowed to flush. A get of an entry which was first put in
    * oplog1 and then in oplog2 should result in the get being done from oplog2.
-   *  
    */
   @Test
-  public void testOplog1FlushOverridingOplog2Flush()
-  {
+  public void testOplog1FlushOverridingOplog2Flush() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -468,8 +434,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
 
   }// end of testOplog1FlushOverridingOplog2Flush
 
-  class DoesFlush implements Runnable
-  {
+  private class DoesFlush implements Runnable {
 
     private Region region;
 
@@ -477,15 +442,14 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       ((LocalRegion)region).getDiskRegion().flushForTesting();
       synchronized (region) {
         region.notify();
         hasBeenNotified = true;
       }
     }
-
   }
 
   /**
@@ -493,8 +457,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * the time rolling has started , the entry exists in the current oplog
    */
   @Test
-  public void testEntryExistsinCurrentOplog()
-  {
+  public void testEntryExistsinCurrentOplog() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -585,11 +548,9 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   /**
    * Entries deleted in current Oplog are recorded correctly during the rolling
    * of that oplog
-   *  
    */
   @Test
-  public void testEntryDeletedinCurrentOplog()
-  {
+  public void testEntryDeletedinCurrentOplog() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -690,28 +651,15 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    // region.close();
   }// end of testEntryDeletedinCurrentOplog
 
-
-  /**
-   * 
-   * @param region
-   *          get LRU statistics
-   */
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
   }
 
   /**
    * to validate the get operation performed on a byte array.
-   * 
-   * @param key
-   * @param region
-   * @return
    */
-
-  private boolean getByteArrVal(Long key, Region region)
-  {
+  private boolean getByteArrVal(Long key, Region region) {
     Object val = null;
     byte[] val2 = new byte[1024];
     Arrays.fill(val2, (byte)77);
@@ -742,12 +690,10 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * Roller should wait for asynch writer to terminate if asynch flush is going
    * on , before deleting the oplog
    */
-  protected boolean afterWritingBytes = false;
+  private boolean afterWritingBytes = false;
 
   @Test
-  public void testOplogRollerWaitingForAsyncWriter()
-  {
-
+  public void testOplogRollerWaitingForAsyncWriter() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -838,8 +784,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
 
   }// end of testOplogRollerWaitingForAsyncWriter
 
-  class DoesFlush1 implements Runnable
-  {
+  private class DoesFlush1 implements Runnable {
 
     private Region region;
 
@@ -847,27 +792,22 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       ((LocalRegion)region).getDiskRegion().flushForTesting();
       synchronized (region) {
         region.notify();
         hasBeenNotified = true;
       }
     }
-
   }
 
   /**
    * Task 125: Ensuring that retrieval of evicted entry data for rolling
    * purposes is correct & does not cause any eviction sort of things
-   * 
-   * @throws EntryNotFoundException
-   *  
    */
   @Test
-  public void testGetEvictedEntry() throws EntryNotFoundException
-  {
+  public void testGetEvictedEntry() throws EntryNotFoundException {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(false);
@@ -969,10 +909,8 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * DiskAccessException doesn't occur even when amount of put data exceeds the
    * max dir sizes.
    */
-
   @Test
-  public void testDiskFullExcep()
-  {
+  public void testDiskFullExcep() {
     boolean exceptionOccured = false;
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = 1048576;