You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/06/09 18:50:00 UTC

[32/54] [abbrv] [partial] incubator-geode git commit: GEODE-837: update tests from JUnit3 to JUnit4

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
index cf42b9b..8815318 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
@@ -19,10 +19,33 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.util.Properties;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import com.gemstone.gemfire.DeltaTestImpl;
 import com.gemstone.gemfire.InvalidDeltaException;
 import com.gemstone.gemfire.LogWriter;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.CacheListener;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.ExpirationAttributes;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.RegionEvent;
+import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.client.Pool;
 import com.gemstone.gemfire.cache.client.PoolFactory;
 import com.gemstone.gemfire.cache.client.PoolManager;
@@ -42,18 +65,21 @@ import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.internal.tcp.ConnectionTable;
-import com.gemstone.gemfire.test.dunit.*;
-
-import java.io.File;
-import java.util.Properties;
-
-import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * @since GemFire 6.1
  */
-public class DeltaPropagationDUnitTest extends DistributedTestCase {
-  private final static Compressor compressor = SnappyCompressor.getDefaultInstance();
+@Category(DistributedTest.class)
+public class DeltaPropagationDUnitTest extends JUnit4DistributedTestCase {
+
+  private static final Compressor compressor = SnappyCompressor.getDefaultInstance();
   
   protected static Cache cache = null;
 
@@ -71,7 +97,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
 
   private int PORT2;
 
-  private static final String regionName = "DeltaPropagationDUnitTest";
+  private static final String regionName = DeltaPropagationDUnitTest.class.getSimpleName();
 
   private static LogWriter logger = null;
 
@@ -125,13 +151,6 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
 
   public static final String DESTROY = "DESTROY";
 
-  /**
-   * @param name
-   */
-  public DeltaPropagationDUnitTest(String name) {
-    super(name);
-  }
-
   @Override
   public final void postSetUp() throws Exception {
     final Host host = Host.getHost(0);
@@ -162,6 +181,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     disconnectAllFromDS();
   }
 
+  @Test
   public void testS2CSuccessfulDeltaPropagationWithCompression() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache(
             HARegionQueue.HA_EVICTION_POLICY_NONE, new Integer(1),
@@ -194,6 +214,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
   
+  @Test
   public void testS2CSuccessfulDeltaPropagation() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -219,6 +240,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
 
+  @Test
   public void testS2CFailureInToDeltaMethod() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -253,6 +275,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
 
+  @Test
   public void testS2CFailureInFromDeltaMethod() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -281,6 +304,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
 
+  @Test
   public void testS2CWithOldValueAtClientOverflownToDisk() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -315,6 +339,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
 
+  @Test
   public void testS2CWithLocallyDestroyedOldValueAtClient() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -349,6 +374,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     verifyData(4, EVENTS_SIZE - 2);
   }
 
+  @Test
   public void testS2CWithInvalidatedOldValueAtClient() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -378,6 +404,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     assertTrue(listenerError.toString(), areListenerResultsValid);
   }
 
+  @Test
   public void testS2CDeltaPropagationWithClientConflationON() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -397,6 +424,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         .getFromDeltaInvokations().longValue() == 0);
   }
 
+  @Test
   public void testS2CDeltaPropagationWithServerConflationON() throws Exception {
     VM0.invoke(() -> DeltaPropagationDUnitTest.closeCache());
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache(
@@ -429,6 +457,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         (fromDeltaInvocations == (EVENTS_SIZE - 1)));
   }
 
+  @Test
   public void testS2CDeltaPropagationWithOnlyCreateEvents() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -451,6 +480,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testC2S2SDeltaPropagation() throws Exception {
     prepareDeltas();
     VM0.invoke(() -> DeltaPropagationDUnitTest.prepareDeltas());
@@ -498,6 +528,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         .deltaFeatureUsed());
   }
 
+  @Test
   public void testS2S2CDeltaPropagationWithHAOverflow() throws Exception {
     prepareDeltas();
     VM0.invoke(() -> DeltaPropagationDUnitTest.prepareDeltas());
@@ -545,6 +576,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         + fromDeltasOnClient, fromDeltasOnClient == (EVENTS_SIZE - 1));
   }
 
+  @Test
   public void testS2CDeltaPropagationWithGIIAndFailover() throws Exception {
     prepareDeltas();
     VM0.invoke(() -> DeltaPropagationDUnitTest.prepareDeltas());
@@ -613,6 +645,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     }
   }
 
+  @Test
   public void testBug40165ClientReconnects() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -683,6 +716,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
 
   }
 
+  @Test
   public void testBug40165ClientFailsOver() throws Exception {
     PORT1 = ((Integer)VM0.invoke(() -> DeltaPropagationDUnitTest.createServerCache( HARegionQueue.HA_EVICTION_POLICY_MEMORY ))).intValue();
     
@@ -1063,7 +1097,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
   public static Integer createServerCache(String ePolicy, Integer cap,
       Integer listenerCode, Boolean conflate, Compressor compressor) throws Exception {
     ConnectionTable.threadWantsSharedResources();
-    new DeltaPropagationDUnitTest("temp").createCache(new Properties());
+    new DeltaPropagationDUnitTest().createCache(new Properties());
     AttributesFactory factory = new AttributesFactory();
     factory.setEnableSubscriptionConflation(conflate);
     if (listenerCode.intValue() != 0) {
@@ -1314,7 +1348,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
     props.setProperty(CONFLATE_EVENTS, conflate);
-    new DeltaPropagationDUnitTest("temp").createCache(props);
+    new DeltaPropagationDUnitTest().createCache(props);
     AttributesFactory factory = new AttributesFactory();
     pool = ClientServerTestCase.configureConnectionPool(factory, "localhost", ports,
         true, Integer.parseInt(rLevel), 2, null, 1000, 250, false, -2);
@@ -1377,7 +1411,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
 
   public static void createDurableCacheClient(Pool poolAttr, String regionName,
       Properties dsProperties, Integer listenerCode, Boolean close) throws Exception {
-    new DeltaPropagationDUnitTest("temp").createCache(dsProperties);
+    new DeltaPropagationDUnitTest().createCache(dsProperties);
     PoolFactoryImpl pf = (PoolFactoryImpl)PoolManager.createFactory();
     pf.init(poolAttr);
     PoolImpl p = (PoolImpl)pf.create("DeltaPropagationDUnitTest");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
index 390d1c2..20deab0 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
@@ -19,9 +19,25 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import com.gemstone.gemfire.DeltaTestImpl;
 import com.gemstone.gemfire.InvalidDeltaException;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.client.Pool;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.server.CacheServer;
@@ -32,16 +48,16 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.tcp.ConnectionTable;
-import com.gemstone.gemfire.test.dunit.*;
-
-import java.util.Properties;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
-import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
-
-/**
- * 
- */
-public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
+@Category(DistributedTest.class)
+public class DeltaPropagationStatsDUnitTest extends JUnit4DistributedTestCase {
 
   protected static VM vm0 = null;
 
@@ -69,10 +85,6 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
 
   private static final int CLIENT_TO_SERVER = 3;
 
-  public DeltaPropagationStatsDUnitTest(String name) {
-    super(name);
-  }
-
   @Override
   public final void postSetUp() throws Exception {
     final Host host = Host.getHost(0);
@@ -112,6 +124,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testS2CDeltaPropagationCleanStats() throws Exception {
     int numOfKeys = 50;
     long updates = 50;
@@ -137,6 +150,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testS2CDeltaPropagationFailedStats1() throws Exception {
     int numOfKeys = 25;
     long updates = 50;
@@ -167,7 +181,9 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
-  public void _testS2CDeltaPropagationFailedStats2() throws Exception {
+  @Ignore("TODO")
+  @Test
+  public void testS2CDeltaPropagationFailedStats2() throws Exception {
   }
 
   /**
@@ -175,6 +191,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testP2PDeltaPropagationCleanStats() throws Exception {
     int numOfKeys = 50;
     long updates = 50;
@@ -203,6 +220,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testP2PDeltaPropagationFailedStats1() throws Exception {
     int numOfKeys = 50, numOfkeys2 = 10;
     long updates = 50, updates2 = 50;
@@ -239,7 +257,9 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
-  public void _testP2PDeltaPropagationFailedStats2() throws Exception {
+  @Ignore("TODO")
+  @Test
+  public void testP2PDeltaPropagationFailedStats2() throws Exception {
   }
 
   /**
@@ -247,6 +267,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testC2SDeltaPropagationCleanStats() throws Exception {
     int numOfKeys = 50;
     long updates = 50;
@@ -279,6 +300,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testC2SDeltaPropagationFailedStats1() throws Exception {
     int numOfKeys = 50;
     long updates = 50;
@@ -310,7 +332,9 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
    * 
    * @throws Exception
    */
-  public void _testC2SDeltaPropagationFailedStats2() throws Exception {
+  @Ignore("TODO")
+  @Test
+  public void testC2SDeltaPropagationFailedStats2() throws Exception {
   }
 
   public static void waitForLastKey() {
@@ -513,7 +537,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    cache = new DeltaPropagationStatsDUnitTest("temp").createCache(props);
+    cache = new DeltaPropagationStatsDUnitTest().createCache(props);
     pool = PoolManager.createFactory().addServer(host, port)
         .setThreadLocalConnections(true).setMinConnections(1)
         .setSubscriptionEnabled(true).setSubscriptionRedundancy(0)
@@ -546,8 +570,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
   public static Integer createServerCache(Boolean flag, DataPolicy policy,
       Scope scope, Boolean listener) throws Exception {
     ConnectionTable.threadWantsSharedResources();
-    DeltaPropagationStatsDUnitTest test = new DeltaPropagationStatsDUnitTest(
-        "temp");
+    DeltaPropagationStatsDUnitTest test = new DeltaPropagationStatsDUnitTest();
     Properties props = new Properties();
     if (!flag) {
       props

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
index 7d53098..664c3b2 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
@@ -16,6 +16,15 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import org.junit.experimental.categories.Category;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
 import java.io.Serializable;
 
 import com.gemstone.gemfire.cache.AttributesFactory;
@@ -39,43 +48,52 @@ import com.gemstone.gemfire.test.dunit.VM;
 /**
  *
  */
-public class DeltaSizingDUnitTest extends CacheTestCase {
+@Category(DistributedTest.class)
+public class DeltaSizingDUnitTest extends JUnit4CacheTestCase {
   
   /**
    * @param name
    */
-  public DeltaSizingDUnitTest(String name) {
-    super(name);
+  public DeltaSizingDUnitTest() {
+    super();
   }
   
+  @Test
   public void testPeerWithoutCloning() throws Exception {
     doPeerTest(false, false);
   }
   
+  @Test
   public void testPeerWithCloning() throws Exception {
     doPeerTest(true, false);
   }
   
+  @Test
   public void testPeerWithCopyOnRead() throws Exception {
     doPeerTest(false, true);
   }
   
+  @Test
   public void testPeerWithCopyOnAndClone() throws Exception {
     doPeerTest(true, true);
   }
   
+  @Test
   public void testClientWithoutCloning() throws Exception {
     doClientTest(false, false);
   }
   
+  @Test
   public void testClientWithCloning() throws Exception {
     doClientTest(true, false);
   }
   
+  @Test
   public void testClientWithCopyOnRead() throws Exception {
     doClientTest(false, true);
   }
   
+  @Test
   public void testClientWithCopyOnAndClone() throws Exception {
     doClientTest(true, true);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
index 5819060..56f533a 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
@@ -16,43 +16,44 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import com.gemstone.gemfire.cache.*;
+import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.HeapDataOutputStream;
 import com.gemstone.gemfire.internal.InternalDataSerializer;
 import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionHolder;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-
-import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
-import static org.junit.Assert.assertEquals;
 
 /**
  * Tests offline compaction
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskOfflineCompactionJUnitTest
-{
-
-  protected static Cache cache = null;
-
-  protected static DistributedSystem ds = null;
+public class DiskOfflineCompactionJUnitTest {
 
   // In this test, entry version, region version, member id, each will be 1 byte
-  final static int versionsize = 3;
+  private static final int versionsize = 3;
 
-  static {
-  }
+  private static Cache cache = null;
+
+  private static DistributedSystem ds = null;
 
   private int getDSID(LocalRegion lr) {
     return lr.getDistributionManager().getDistributedSystemId();    
@@ -217,8 +218,7 @@ public class DiskOfflineCompactionJUnitTest
   }
 
   @Test
-  public void testTwoEntriesWithUpdateAndDestroy()
-      throws Exception {
+  public void testTwoEntriesWithUpdateAndDestroy() throws Exception {
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     dsf.setAutoCompact(false);
     String name = "testTwoEntriesWithUpdateAndDestroy";
@@ -553,8 +553,7 @@ public class DiskOfflineCompactionJUnitTest
   }
 
   @Test
-  public void testForceRollTwoEntriesWithUpdates()
-      throws Exception {
+  public void testForceRollTwoEntriesWithUpdates() throws Exception {
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     dsf.setAutoCompact(false);
     String name = "testForceRollTwoEntriesWithUpdates";
@@ -639,8 +638,7 @@ public class DiskOfflineCompactionJUnitTest
   }
 
   @Test
-  public void testForceRollTwoEntriesWithUpdateAndDestroy()
-      throws Exception {
+  public void testForceRollTwoEntriesWithUpdateAndDestroy() throws Exception {
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     dsf.setAutoCompact(false);
     String name = "testForceRollTwoEntriesWithUpdateAndDestroy";
@@ -733,7 +731,7 @@ public class DiskOfflineCompactionJUnitTest
   }
   
   // uitl methods for calculation
-  public static int getValueSizeInOplog(Object value) {
+  private  static int getValueSizeInOplog(Object value) {
     if (value instanceof String) {
       return getStrSizeInOplog((String)value);
     } else if (value instanceof byte[]) {
@@ -742,33 +740,40 @@ public class DiskOfflineCompactionJUnitTest
     }
     return -1;
   }
-  public static int getStrSizeInOplog(String str) {
+
+  private static int getStrSizeInOplog(String str) {
     // string saved in UTF format will use 3 bytes extra.
     // 4 is hard-coded overhead in Oplog for each string
     return str.length()+3+4;
   }
-  public static int getSize4Create(int extra_byte_num_per_entry, String key, Object value) {
+
+  static int getSize4Create(int extra_byte_num_per_entry, String key, Object value) {
     int createsize = 1 /* opcode */ + 1 /* userbits */ + versionsize + extra_byte_num_per_entry
       + getStrSizeInOplog(key) + 1 /* drid */ + getValueSizeInOplog(value) + 1 /* END_OF_RECORD_ID */;
     return createsize;
   }
-  public static int getSize4UpdateWithKey(int extra_byte_num_per_entry, String key, Object value) {
+
+  private static int getSize4UpdateWithKey(int extra_byte_num_per_entry, String key, Object value) {
     return getSize4UpdateWithoutKey(extra_byte_num_per_entry, value) + getStrSizeInOplog(key);
   }
-  public static int getSize4UpdateWithoutKey(int extra_byte_num_per_entry, Object value) {
+
+  private static int getSize4UpdateWithoutKey(int extra_byte_num_per_entry, Object value) {
     int updatesize = 1 /* opcode */ + 1 /* userbits */ + versionsize + extra_byte_num_per_entry
     + 1 /* drid */ + getValueSizeInOplog(value) +1 /* delta */ + 1 /* END_OF_RECORD_ID */;
     return updatesize;
   }
-  public static int getSize4TombstoneWithKey(int extra_byte_num_per_entry, String key) {
+
+  static int getSize4TombstoneWithKey(int extra_byte_num_per_entry, String key) {
     return getSize4TombstoneWithoutKey(extra_byte_num_per_entry) + getStrSizeInOplog(key);
   }
-  public static int getSize4TombstoneWithoutKey(int extra_byte_num_per_entry) {
+
+  private static int getSize4TombstoneWithoutKey(int extra_byte_num_per_entry) {
     int tombstonesize = 1 /* opcode */ + 1 /* userbits */ + versionsize + extra_byte_num_per_entry
     + 1 /* drid */ +1 /* delta */ + 1 /* END_OF_RECORD_ID */;
     return tombstonesize;
   }
-  public static int getRVVSize(int drMapSize, int[] numOfMemberPerDR, boolean gcRVV) {
+
+  static int getRVVSize(int drMapSize, int[] numOfMemberPerDR, boolean gcRVV) {
     // if there's one member in rvv, total size is 9 bytes: 
     // 0: OPLOG_RVV. 1: drMap.size()==1, 2: disRegionId, 3: getRVVTrusted 
     // 4: memberToVersion.size()==1, 5: memberid, 6-7: versionHolder 8: END_OF_RECORD_ID

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
index ccf6c87..1b78dae 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
@@ -16,6 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.util.Properties;
+import java.util.Set;
+
 import com.gemstone.gemfire.cache.*;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
@@ -24,6 +30,17 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskWriteAttributesFactory;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
 import java.io.File;
 import java.util.Properties;
 import java.util.Set;
@@ -34,29 +51,24 @@ import static org.junit.Assert.*;
 /**
  * Tests the old disk apis to make sure they do the correct thing.
  * Once we drop these old deprecated disk apis then this unit test can be removed.
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskOldAPIsJUnitTest
-{
+public class DiskOldAPIsJUnitTest {
 
   protected static Cache cache = null;
 
   protected static DistributedSystem ds = null;
-  protected static Properties props = new Properties();
 
-  static {
+  @Before
+  public void setUp() throws Exception {
+    Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
     props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
     props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
     props.setProperty(ENABLE_TIME_STATISTICS, "true");
     props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
-  }
 
-  @Before
-  public void setUp() throws Exception {
     cache = new CacheFactory(props).create();
     ds = cache.getDistributedSystem();
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
@@ -72,11 +84,12 @@ public class DiskOldAPIsJUnitTest
    * Make sure that if diskWriteAttributes sets sync then it shows up in the new apis.
    */
   @Test
-  public void testSyncBit() {
+  public void testSyncBit() throws Exception {
     doSyncBitTest(true);
     doSyncBitTest(false);
     doSyncBitTest(true);
   }
+
   private void doSyncBitTest(boolean destroyRegion) {
     DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
     dwaf.setSynchronous(true);
@@ -200,7 +213,7 @@ public class DiskOldAPIsJUnitTest
    * Note that the isSync bit is tested by another method.
    */
   @Test
-  public void testDWA_1() {
+  public void testDWA_1() throws Exception {
     DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
     dwaf.setMaxOplogSize(1);
     dwaf.setTimeInterval(333);
@@ -250,7 +263,7 @@ public class DiskOldAPIsJUnitTest
   }
   
   @Test
-  public void testDWA_2() {
+  public void testDWA_2() throws Exception {
     DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
     dwaf.setMaxOplogSize(2);
     dwaf.setTimeInterval(1);
@@ -300,7 +313,7 @@ public class DiskOldAPIsJUnitTest
    * Make sure the old diskDirs apis get mapped onto the diskStore.
    */
   @Test
-  public void testDiskDirs() {
+  public void testDiskDirs() throws Exception {
     File f1 = new File("testDiskDir1");
     f1.mkdir();
     File f2 = new File("testDiskDir2");
@@ -358,7 +371,7 @@ public class DiskOldAPIsJUnitTest
    * Make sure the old diskDirs apis get mapped onto the diskStore.
    */
   @Test
-  public void testDiskDirsAndSizes() {
+  public void testDiskDirsAndSizes() throws Exception {
     File f1 = new File("testDiskDir1");
     f1.mkdir();
     File f2 = new File("testDiskDir2");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
index 93e8731..43fe7b0 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRandomOperationsAndRecoveryJUnitTest.java
@@ -38,39 +38,29 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-/**
- * 
- *
- */
-@Category(IntegrationTest.class)
-public class DiskRandomOperationsAndRecoveryJUnitTest extends DiskRegionTestingBase
-{
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-  private static int ENTRY_SIZE = 1024;
-  final static byte[] valueBytes = new byte[ENTRY_SIZE];
+@
+  Category(IntegrationTest.class)
+public class DiskRandomOperationsAndRecoveryJUnitTest extends DiskRegionTestingBase {
+
+  private static final int ENTRY_SIZE = 1024;
+
+  private static final byte[] valueBytes = new byte[ENTRY_SIZE];
   static {
     Arrays.fill(valueBytes, (byte)32);
   }
-  final static private Object value = new String(valueBytes);
 
-  // private static final boolean debug = false;
+  private static final Object value = new String(valueBytes);
 
-  private static int testId=0;
-  
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  private static int testId = 0;
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
+
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     testId++;
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
-
   private final static int ITERATIONS = 4;
   private final static long MAX_OPLOG_SIZE_IN_BYTES = 1024*16;
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
index 84771b0..2fe634b 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
@@ -16,6 +16,15 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import org.junit.experimental.categories.Category;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
 import java.io.File;
 import java.util.Arrays;
 import java.util.Properties;
@@ -44,7 +53,8 @@ import com.gemstone.gemfire.test.dunit.VM;
  * 
  */
 
-public class DiskRegByteArrayDUnitTest extends CacheTestCase {
+@Category(DistributedTest.class)
+public class DiskRegByteArrayDUnitTest extends JUnit4CacheTestCase {
   static Cache cache;
   static Properties props = new Properties();
   static Properties propsWork = new Properties();
@@ -53,27 +63,26 @@ public class DiskRegByteArrayDUnitTest extends CacheTestCase {
   static CacheTransactionManager cacheTxnMgr;
   protected static File[] dirs = null;
   final static byte[] value = new byte[1024];
-  
-   
-    public DiskRegByteArrayDUnitTest(String name) {
-        super(name);
-        File file1 = new File( name + "1");
-        file1.mkdir();
-        file1.deleteOnExit();
-        File file2 = new File( name + "2");
-        file2.mkdir();
-        file2.deleteOnExit();
-        File file3 = new File( name + "3");
-        file3.mkdir();
-        file3.deleteOnExit();
-        File file4 = new File( name + "4");
-        file4.mkdir();
-        file4.deleteOnExit();
-        dirs = new File[4];
-        dirs[0] = file1;
-        dirs[1] = file2;
-        dirs[2] = file3;
-        dirs[3] = file4;
+
+    public DiskRegByteArrayDUnitTest() {
+      super();
+      File file1 = new File( getTestMethodName() + "1");
+      file1.mkdir();
+      file1.deleteOnExit();
+      File file2 = new File( getTestMethodName() + "2");
+      file2.mkdir();
+      file2.deleteOnExit();
+      File file3 = new File( getTestMethodName() + "3");
+      file3.mkdir();
+      file3.deleteOnExit();
+      File file4 = new File( getTestMethodName() + "4");
+      file4.mkdir();
+      file4.deleteOnExit();
+      dirs = new File[4];
+      dirs[0] = file1;
+      dirs[1] = file2;
+      dirs[2] = file3;
+      dirs[3] = file4;
     }
 
     @Override
@@ -103,7 +112,7 @@ public class DiskRegByteArrayDUnitTest extends CacheTestCase {
     
     public static void createCacheForVM0(){
         try{
-            ds = (new DiskRegByteArrayDUnitTest("vm0_diskReg")).getSystem(props);
+            ds = (new DiskRegByteArrayDUnitTest()).getSystem(props);
             cache = CacheFactory.create(ds);
             AttributesFactory factory  = new AttributesFactory();
             factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -124,7 +133,7 @@ public class DiskRegByteArrayDUnitTest extends CacheTestCase {
     public static void createCacheForVM1(){
         try{
             
-            ds = (new DiskRegByteArrayDUnitTest("vm1_diskReg")).getSystem(props);
+            ds = (new DiskRegByteArrayDUnitTest()).getSystem(props);
             cache = CacheFactory.create(ds);
             AttributesFactory factory  = new AttributesFactory();
             factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -154,7 +163,8 @@ public class DiskRegByteArrayDUnitTest extends CacheTestCase {
     
     //test methods
  
-    public void testPutGetByteArray(){
+  @Test
+  public void testPutGetByteArray(){
         
         Host host = Host.getHost(0);
         VM vm0 = host.getVM(0);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
index 840cb67..3849be3 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
@@ -16,23 +16,28 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.*;
-import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import com.gemstone.gemfire.util.test.TestUtil;
-import org.junit.After;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.SystemFailure;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.CacheTransactionManager;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+import com.gemstone.gemfire.util.test.TestUtil;
 
 /**
  * This test is for testing Disk attributes set via xml.
@@ -43,16 +48,15 @@ import static org.junit.Assert.fail;
  * @since GemFire 5.1
  */
 @Category(IntegrationTest.class)
-public class DiskRegCacheXmlJUnitTest
-{
-  Cache cache = null;
+public class DiskRegCacheXmlJUnitTest {
 
-  DistributedSystem ds = null;
+  private Cache cache = null;
 
-  protected static File[] dirs = null;
+  private DistributedSystem ds = null;
 
-  public void mkDirAndConnectDs()
-  {
+  private static File[] dirs = null;
+
+  private void mkDirAndConnectDs() {
     File file1 = new File("d1");
     file1.mkdir();
     file1.deleteOnExit();
@@ -73,19 +77,12 @@ public class DiskRegCacheXmlJUnitTest
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(CACHE_XML_FILE, path);
     ds = DistributedSystem.connect(props);
-    try {
-      // Create the cache which causes the cache-xml-file to be parsed
-      cache = CacheFactory.create(ds);
-    }
-    catch (Exception ex) {
-      ds.getLogWriter().error("Exception occured",ex);
-      fail("failed to create cache due to "+ex);
-    }
+    // Create the cache which causes the cache-xml-file to be parsed
+    cache = CacheFactory.create(ds);
   }
 
   @Test
-  public void testDiskRegCacheXml()
-  {
+  public void testDiskRegCacheXml() throws Exception {
     mkDirAndConnectDs();
     // Get the region1 which is a subregion of /root
     Region region1 = cache.getRegion("/root1/PersistSynchRollingOplog1");
@@ -218,30 +215,21 @@ public class DiskRegCacheXmlJUnitTest
     deleteFiles();
   }
 
-  private static void deleteFiles()
-  {
+  private static void deleteFiles() {
     for (int i = 0; i < dirs.length; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
         files[j].delete();
       }
     }
-
   }
-  
-  
-  @After
-  public void tearDown() throws Exception
-  {
 
-    /*if (cache != null && !cache.isClosed()) {
-      cache.close();
-    }*/
+  @After
+  public void tearDown() throws Exception {
     try {
       if (cache != null && !cache.isClosed()) {
         for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext();) {
           Region root = (Region)itr.next();
-//          String name = root.getName();
 					if(root.isDestroyed() || root instanceof HARegion) {
             continue;
         	}
@@ -259,21 +247,10 @@ public class DiskRegCacheXmlJUnitTest
       }
     }
     finally {
-      try {
-        closeCache();
-      }
-      catch (VirtualMachineError e) {
-        SystemFailure.initiateFailure(e);
-        throw e;
-      }
-      catch (Throwable t) {
-        ds.getLogWriter().error("Error in closing the cache ", t);
-        
-      }
+      closeCache();
     }
   }
-  
-  
+
   /** Close the cache */
   private  synchronized final void closeCache() {
     if (cache != null) {
@@ -294,4 +271,3 @@ public class DiskRegCacheXmlJUnitTest
     }
   }
 }// end of DiskRegCacheXmlJUnitTest
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
index 76eaab8..591046f 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
@@ -16,46 +16,38 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
 
 import java.io.File;
 import java.io.FileWriter;
+import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.DistributedSystemConfigProperties.*;
-import static org.junit.Assert.fail;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * This test is for testing Disk attributes set programmatically
  * The generated cacheXml is used to create a cache and teh region
  * properties retested.
- * 
  */
 @Category(IntegrationTest.class)
-public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
-{
-  PrintWriter pw;
-
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties[] diskRegionProperties = new DiskRegionProperties[12];
+  private DiskRegionProperties[] diskRegionProperties = new DiskRegionProperties[12];
 
-  Region[] regions = new Region[12];
+  private Region[] regions = new Region[12];
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskDirSize = new int[4];
     diskDirSize[0] = Integer.MAX_VALUE;
     diskDirSize[1] = Integer.MAX_VALUE;
@@ -74,54 +66,46 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     }
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  public void createCacheXML()
-  {
+  public void createCacheXML() throws IOException {
     // create the regions[0] which is SyncPersistOnly and set DiskWriteAttibutes
     diskRegionProperties[0].setRolling(true);
     diskRegionProperties[0].setMaxOplogSize(1073741824L);
     diskRegionProperties[0].setRegionName("regions1");
     regions[0] = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
-        diskRegionProperties[0], Scope.LOCAL);
+      diskRegionProperties[0], Scope.LOCAL);
 
     // create the regions[1] which is SyncPersistOnly and set DiskWriteAttibutes
 
     diskRegionProperties[1].setRolling(false);
     diskRegionProperties[1].setRegionName("regions2");
     regions[1] = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
-        diskRegionProperties[1], Scope.LOCAL);
+      diskRegionProperties[1], Scope.LOCAL);
 
     // create the regions[2] which AsyncPersistOnly, No buffer and Rolling oplog
     diskRegionProperties[2].setRolling(true);
     diskRegionProperties[2].setMaxOplogSize(1073741824L);
     diskRegionProperties[2].setRegionName("regions3");
     regions[2] = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
-        diskRegionProperties[2]);
+      diskRegionProperties[2]);
 
     // create the regions[3] which is AsynchPersistonly, No buffer and fixed oplog
     diskRegionProperties[3].setRolling(false);
     diskRegionProperties[3].setRegionName("regions4");
     regions[3] = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
-        diskRegionProperties[3]);
+      diskRegionProperties[3]);
 
     // create the regions[4] which is SynchOverflowOnly, Rolling oplog
     diskRegionProperties[4].setRolling(true);
     diskRegionProperties[4].setMaxOplogSize(1073741824L);
     diskRegionProperties[4].setRegionName("regions5");
     regions[4] = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
-        diskRegionProperties[4]);
+      diskRegionProperties[4]);
 
     // create the regions[5] which is SyncOverflowOnly, Fixed oplog
     diskRegionProperties[5].setRolling(false);
     diskRegionProperties[5].setRegionName("regions6");
     regions[5] = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
-        diskRegionProperties[5]);
+      diskRegionProperties[5]);
 
     // create the regions[6] which is AsyncOverflow, with Buffer and rolling oplog
     diskRegionProperties[6].setRolling(true);
@@ -130,7 +114,7 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskRegionProperties[6].setTimeInterval(15l);
     diskRegionProperties[6].setRegionName("regions7");
     regions[6] = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
-        diskRegionProperties[6]);
+      diskRegionProperties[6]);
 
     // create the regions[7] which is AsyncOverflow ,Time base buffer-zero byte
     // buffer
@@ -140,20 +124,20 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskRegionProperties[7].setBytesThreshold(0l);
     diskRegionProperties[7].setRegionName("regions8");
     regions[7] = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
-        diskRegionProperties[7]);
+      diskRegionProperties[7]);
 
     // create the regions[8] which is SyncPersistOverflow, Rolling oplog
     diskRegionProperties[8].setRolling(true);
     diskRegionProperties[8].setMaxOplogSize(1073741824L);
     diskRegionProperties[8].setRegionName("regions9");
     regions[8] = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
-        diskRegionProperties[8]);
+      diskRegionProperties[8]);
 
     // create the regions[9] which is Sync PersistOverflow, fixed oplog
     diskRegionProperties[9].setRolling(false);
     diskRegionProperties[9].setRegionName("regions10");
     regions[9] = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
-        diskRegionProperties[9]);
+      diskRegionProperties[9]);
     // create the regions[10] which is Async Overflow Persist ,with buffer and
     // rollong
     // oplog
@@ -163,7 +147,7 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskRegionProperties[10].setTimeInterval(15l);
     diskRegionProperties[10].setRegionName("regions11");
     regions[10] = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
-        diskRegionProperties[10]);
+      diskRegionProperties[10]);
 
     // create the regions[11] which is Async Persist Overflow with time based
     // buffer
@@ -173,73 +157,63 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskRegionProperties[11].setTimeInterval(15l);
     diskRegionProperties[11].setRegionName("regions12");
     regions[11] = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
-        diskRegionProperties[11]);
-
+      diskRegionProperties[11]);
 
     //cacheXmlGenerator: generates cacheXml file
-    try {
-      FileWriter fw = new FileWriter(new File("DiskRegCachexmlGeneratorJUnitTest.xml"));
-      PrintWriter pw = new PrintWriter(fw);
-      CacheXmlGenerator.generate(cache, pw);
-    }
-    catch (Exception ex) {
-      logWriter.error("Exception occured",ex);
-      fail("FAILED While cache xml generation");
-    }
-
+    FileWriter fw = new FileWriter(new File(getClass().getSimpleName() + ".xml"));
+    PrintWriter pw = new PrintWriter(fw);
+    CacheXmlGenerator.generate(cache, pw);
   }
-  
+
   @Test
-  public void testVerifyCacheXml() throws Exception
-  {
+  public void testVerifyCacheXml() throws Exception {
     createCacheXML();
     ds.disconnect();
     // Connect to the GemFire distributed system
     Properties props = new Properties();
-    props.setProperty(NAME, "DiskRegCachexmlGeneratorJUnitTest");
+    props.setProperty(NAME, getClass().getSimpleName());
     props.setProperty(MCAST_PORT, "0");
-    String path = "DiskRegCachexmlGeneratorJUnitTest.xml";
+    String path = getClass().getSimpleName() + ".xml";
     props.setProperty(CACHE_XML_FILE, path);
     ds = DistributedSystem.connect(props);
     // Create the cache which causes the cache-xml-file to be parsed
     cache = CacheFactory.create(ds);
 
-    // Get the regions[0] 
+    // Get the regions[0]
     verify((LocalRegion) cache.getRegion("regions1"), diskRegionProperties[0]);
 
     // Get the regions[1]
     verify((LocalRegion) cache.getRegion("regions2"), diskRegionProperties[1]);
 
-    // Get the regions[2] 
+    // Get the regions[2]
     verify((LocalRegion) cache.getRegion("regions3"), diskRegionProperties[2]);
 
-    // Get the regions[3] 
+    // Get the regions[3]
     verify((LocalRegion) cache.getRegion("regions4"), diskRegionProperties[3]);
-    
-    // Get the regions[4] 
+
+    // Get the regions[4]
     verify((LocalRegion) cache.getRegion("regions5"), diskRegionProperties[4]);
 
-    // Get the regions[5] 
+    // Get the regions[5]
     verify((LocalRegion) cache.getRegion("regions6"), diskRegionProperties[5]);
-    
-    // Get the regions[6] 
+
+    // Get the regions[6]
     verify((LocalRegion) cache.getRegion("regions7"), diskRegionProperties[6]);
 
-    // Get the regions[7] 
+    // Get the regions[7]
     verify((LocalRegion) cache.getRegion("regions8"), diskRegionProperties[7]);
 
-    // Get the regions[8] 
+    // Get the regions[8]
     verify((LocalRegion) cache.getRegion("regions9"), diskRegionProperties[8]);
 
-    // Get the regions[9] 
+    // Get the regions[9]
     verify((LocalRegion) cache.getRegion("regions10"), diskRegionProperties[9]);
 
     // Get the regions[10]
     verify((LocalRegion) cache.getRegion("regions11"), diskRegionProperties[10]);
 
-    // Get the regions[11] 
+    // Get the regions[11]
     verify((LocalRegion) cache.getRegion("regions12"), diskRegionProperties[11]);
   }
 
-}// end of DiskRegCachexmlGeneratorJUnitTest
-
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
index 34d540d..75701e1 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCbkChkJUnitTest.java
@@ -16,10 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -30,30 +28,16 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- *  * Tests if callbacks are getting invoked correctly 
- *   * for 'create', 'update' and 'destroy' of disk region entries
- *    * with concurrent 'clear' 
- *      *
+ * Tests if callbacks are getting invoked correctly
+ * for 'create', 'update' and 'destroy' of disk region entries
+ * with concurrent 'clear'
  */
 @Category(IntegrationTest.class)
-public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase 
-{
+public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase {
 
-  volatile static boolean intoCreateAfterCbk = false;
-  volatile static boolean intoUpdateAfterCbk = false;
-  volatile static boolean intoDestroyAfterCbk = false;
-  
-  @Before
-  public void setUp() throws Exception
-  {  
-    super.setUp();
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+  private static volatile boolean intoCreateAfterCbk = false;
+  private static volatile boolean intoUpdateAfterCbk = false;
+  private static volatile boolean intoDestroyAfterCbk = false;
   
   private DiskRegionProperties getDiskRegionProperties(){
     DiskRegionProperties diskProperties = new DiskRegionProperties();
@@ -64,8 +48,7 @@ public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase
   }
     
   @Test
-  public void testAfterCallbacks()
-  {
+  public void testAfterCallbacks() {
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
       getDiskRegionProperties(), Scope.LOCAL);
 
@@ -112,6 +95,5 @@ public class DiskRegCbkChkJUnitTest extends DiskRegionTestingBase
     region.create("key3", "createValue");
     region.destroy("key3");
     assertTrue("Destroy callback not called", intoDestroyAfterCbk);
-	
-  }  
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
index ba3af6a..99ab7e6 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegOplogSwtchingAndRollerJUnitTest.java
@@ -16,16 +16,11 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -39,44 +34,31 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Miscellaneous disk tests
- * 
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOplogSwtchingAndRollerJUnitTest extends
-    DiskRegionTestingBase
-{
+public class DiskRegOplogSwtchingAndRollerJUnitTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private static File[] dirs1 = null;
 
-  protected boolean encounteredException = false;
+  private static int[] diskDirSize1 = null;
 
-  protected volatile boolean hasBeenNotified = false;
+  private volatile boolean hasBeenNotified = false;
 
-  protected static File[] dirs1 = null;
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  protected static int[] diskDirSize1 = null;
+  private boolean encounteredException = false;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
-  }
+  private Object forWaitNotify = new Object();
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+  private boolean gotNotification = false;
+
+  private Object getValOnDsk = null;
 
   /**
-   * tests non occurence of DiskAccessException
-   *  
+   * tests non occurrence of DiskAccessException
    */
   @Test
-  public void testSyncPersistRegionDAExp()
-  {
+  public void testSyncPersistRegionDAExp() {
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
     testingDirectory1.deleteOnExit();
@@ -114,8 +96,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }// end of testSyncPersistRegionDAExp
 
   @Test
-  public void testAsyncPersistRegionDAExp()
-  {
+  public void testAsyncPersistRegionDAExp() {
     File testingDirectory1 = new File("testingDirectory1");
     testingDirectory1.mkdir();
     testingDirectory1.deleteOnExit();
@@ -151,8 +132,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
   }// end of testAsyncPersistRegionDAExp
 
-  private void diskAccessExpHelpermethod(final Region region)
-  {
+  private void diskAccessExpHelpermethod(final Region region) {
     final byte[] value = new byte[990];
     Arrays.fill(value, (byte)77);
     try {
@@ -172,18 +152,11 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     }
   }
 
-  protected Object forWaitNotify = new Object();
-
-  protected boolean gotNotification = false;
-
   /**
    * DiskRegionRollingJUnitTest :
-   * 
-   *  
    */
   @Test
-  public void testSyncRollingHappening()
-  {
+  public void testSyncRollingHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -235,8 +208,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testSyncRollingNotHappening()
-  {
+  public void testSyncRollingNotHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -277,8 +249,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testAsyncRollingHappening()
-  {
+  public void testAsyncRollingHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -334,8 +305,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   }
 
   @Test
-  public void testAsyncRollingNotHappening()
-  {
+  public void testAsyncRollingNotHappening() {
     try {
       DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
       diskRegionProperties.setDiskDirs(dirs);
@@ -374,8 +344,6 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
   }
 
-  protected Object getValOnDsk = null;
-
   /**
    * DiskRegOplog1OverridingOplog2JUnitTest: Disk Region test : oplog1 flush
    * overriding oplog2 flush
@@ -383,11 +351,9 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * This test will hold the flush of oplog1 and flush oplog2 before it. After
    * that oplog1 is allowed to flush. A get of an entry which was first put in
    * oplog1 and then in oplog2 should result in the get being done from oplog2.
-   *  
    */
   @Test
-  public void testOplog1FlushOverridingOplog2Flush()
-  {
+  public void testOplog1FlushOverridingOplog2Flush() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -468,8 +434,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
 
   }// end of testOplog1FlushOverridingOplog2Flush
 
-  class DoesFlush implements Runnable
-  {
+  private class DoesFlush implements Runnable {
 
     private Region region;
 
@@ -477,15 +442,14 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       ((LocalRegion)region).getDiskRegion().flushForTesting();
       synchronized (region) {
         region.notify();
         hasBeenNotified = true;
       }
     }
-
   }
 
   /**
@@ -493,8 +457,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * the time rolling has started , the entry exists in the current oplog
    */
   @Test
-  public void testEntryExistsinCurrentOplog()
-  {
+  public void testEntryExistsinCurrentOplog() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -585,11 +548,9 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
   /**
    * Entries deleted in current Oplog are recorded correctly during the rolling
    * of that oplog
-   *  
    */
   @Test
-  public void testEntryDeletedinCurrentOplog()
-  {
+  public void testEntryDeletedinCurrentOplog() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -690,28 +651,15 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    // region.close();
   }// end of testEntryDeletedinCurrentOplog
 
-
-  /**
-   * 
-   * @param region
-   *          get LRU statistics
-   */
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
   }
 
   /**
    * to validate the get operation performed on a byte array.
-   * 
-   * @param key
-   * @param region
-   * @return
    */
-
-  private boolean getByteArrVal(Long key, Region region)
-  {
+  private boolean getByteArrVal(Long key, Region region) {
     Object val = null;
     byte[] val2 = new byte[1024];
     Arrays.fill(val2, (byte)77);
@@ -742,12 +690,10 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * Roller should wait for asynch writer to terminate if asynch flush is going
    * on , before deleting the oplog
    */
-  protected boolean afterWritingBytes = false;
+  private boolean afterWritingBytes = false;
 
   @Test
-  public void testOplogRollerWaitingForAsyncWriter()
-  {
-
+  public void testOplogRollerWaitingForAsyncWriter() {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
@@ -838,8 +784,7 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
 
   }// end of testOplogRollerWaitingForAsyncWriter
 
-  class DoesFlush1 implements Runnable
-  {
+  private class DoesFlush1 implements Runnable {
 
     private Region region;
 
@@ -847,27 +792,22 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
       this.region = region;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       ((LocalRegion)region).getDiskRegion().flushForTesting();
       synchronized (region) {
         region.notify();
         hasBeenNotified = true;
       }
     }
-
   }
 
   /**
    * Task 125: Ensuring that retrieval of evicted entry data for rolling
    * purposes is correct & does not cause any eviction sort of things
-   * 
-   * @throws EntryNotFoundException
-   *  
    */
   @Test
-  public void testGetEvictedEntry() throws EntryNotFoundException
-  {
+  public void testGetEvictedEntry() throws EntryNotFoundException {
     hasBeenNotified = false;
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(false);
@@ -969,10 +909,8 @@ public class DiskRegOplogSwtchingAndRollerJUnitTest extends
    * DiskAccessException doesn't occur even when amount of put data exceeds the
    * max dir sizes.
    */
-
   @Test
-  public void testDiskFullExcep()
-  {
+  public void testDiskFullExcep() {
     boolean exceptionOccured = false;
     int[] diskDirSize1 = new int[4];
     diskDirSize1[0] = 1048576;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
index 4e3beaa..792f07e 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegRecoveryJUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.io.BufferedInputStream;
 import java.io.DataInputStream;
 import java.io.File;
@@ -25,13 +27,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.Region;
@@ -45,33 +43,24 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * @since GemFire 5.1
  */
 @Category(IntegrationTest.class)
-public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
-{
-  DiskRegionProperties diskProps = new DiskRegionProperties();
-  
-  private static int EMPTY_RVV_SIZE = 6;
-
-//  private static final boolean debug = false;
-
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
-    diskProps.setDiskDirs(dirs);
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
+public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase {
 
+  private static int EMPTY_RVV_SIZE = 6;
   private static int ENTRY_SIZE = 1024;
 
   private static boolean oplogsIDsNotifiedToRoll;
+
+  private boolean proceedWithRolling;
+  private boolean rollingDone;
+  private boolean verifiedOplogs;
+  private final Object verifiedSync = new Object();
+
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
   
-  boolean proceedWithRolling, rollingDone, verifiedOplogs;
-  final Object verifiedSync = new Object();
+  @Override
+  protected final void postSetUp() throws Exception {
+    diskProps.setDiskDirs(dirs);
+  }
 
   /**
    * Disk region recovery test for Persist only with sync writes. Test has four
@@ -82,11 +71,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * 1. Get and verify the entries put in STEP 1 and STEP 2. STEP 4: Create
    * cache. Create Region with the same name as that of in STEP 1. Get and
    * verify the entries put in STEP 1 and STEP 2.
-   * 
    */
   @Test
-  public void testDiskRegRecovery()
-  {
+  public void testDiskRegRecovery() {
     /**
      * STEP 1
      */
@@ -310,21 +297,18 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     getByteArrValZeroLnth("119", region);
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
 
-/**
+  /**
    * Disk region recovery test for Persist only with sync writes. Test has four
    * steps : STEP 1: Create cache. Create Region. Put entries. Close cache. STEP
    * 2: Create cache. Create Region with the same name as that of in STEP 1. Delete some entries.
    * Close the Cache   * 
    * 3: Again Create cache. Create Region with the same name as that of in STEP
    * 4) Verify that the entries got deleted
-   * 
    */
   @Test
-  public void testBug39989_1()
-  {
+  public void testBug39989_1() {
     /**
      * STEP 1
      */
@@ -409,9 +393,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     }   
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
-  
 
   /**
    * Disk region recovery test for Persist only with sync writes. Test has four
@@ -421,11 +403,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * Close the Cache   * 
    * 3: Again Create cache.
    * 4) check if the region creation is successful
-   * 
    */
   @Test
-  public void testBug39989_2()
-  {
+  public void testBug39989_2() {
     /**
      * STEP 1
      */
@@ -516,16 +496,12 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     }   
 
     closeDown();  // closes disk file which will flush all buffers
-
   }
 
   /**
    * To validate the get operation performed on a byte array.
-   *  
    */
-
-  private void getByteArrVal(String key, Region region)
-  {
+  private void getByteArrVal(String key, Region region) {
     byte[] val = (byte[])region.get(key);
     //verify that the retrieved byte[] equals to the value put initially.
     // val should be an unitialized array of bytes of length 1024
@@ -537,10 +513,8 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
 
   /**
    * to validate the get operation performed on a byte array of length zero
-   *  
    */
-  private boolean getByteArrValZeroLnth(String key, Region region)
-  {
+  private boolean getByteArrValZeroLnth(String key, Region region) {
     Object val0 = null;
     byte[] val2 = new byte[0];
     try {
@@ -567,14 +541,12 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
     return result;
   }
 
-  public void verifyOplogSizeZeroAfterRecovery(Region region)
-  {
+  private void verifyOplogSizeZeroAfterRecovery(Region region) {
     assertEquals(Oplog.OPLOG_MAGIC_SEQ_REC_SIZE*2 + Oplog.OPLOG_DISK_STORE_REC_SIZE*2 + EMPTY_RVV_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE*2, ((LocalRegion)region).getDiskRegion().testHook_getChild().getOplogSize());
   }
 
   @Test
-  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesTrue()
-  {
+  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesTrue() {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -628,8 +600,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesFalse()
-  {
+  public void testNoEvictionDuringRecoveryIfNoGIIRecoverValuesFalse() {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -688,8 +659,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testEmptyRegionRecover()
-  {
+  public void testEmptyRegionRecover() {
     diskProps.setDiskDirs(dirs);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
     Assert.assertTrue(region.size() == 0);
@@ -714,8 +684,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testReadCorruptedFile()
-  {
+  public void testReadCorruptedFile() {
     diskProps.setDiskDirs(dirs);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
 
@@ -765,8 +734,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testForceCompactionForRegionWithRollingDisabled()
-      throws Exception {
+  public void testForceCompactionForRegionWithRollingDisabled() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setMaxOplogSize(2048+(18*2)+15*7);
     diskProps.setRolling(false);
@@ -837,12 +805,9 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
    * leaves it in created set & so when the compactor processes the created Set it 
    * thinks that the entry is now referenced in the any of the subsequent oplogs & thus
    * overwrites it with a byte[].
-   * @throws Exception
    */
-  
   @Test
-  public void testVestigialCreatesInOplog() throws Exception
-  {
+  public void testVestigialCreatesInOplog() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setMaxOplogSize(40);
     diskProps.setPersistBackup(true);
@@ -907,9 +872,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOnlyRecoverValuesTrue()
-      throws Exception
-  {
+  public void testDiskIDFieldsForPersistOnlyRecoverValuesTrue() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -985,13 +948,10 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
         System.clearProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
       }
     }
-
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOverFlowRecoverValuesTrue()
-      throws Exception
-  {
+  public void testDiskIDFieldsForPersistOverFlowRecoverValuesTrue() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     try {
@@ -1067,8 +1027,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
   
   @Test
-  public void testDiskIDFieldsForPersistOnlyRecoverValuesFalse()
-      throws Exception {
+  public void testDiskIDFieldsForPersistOnlyRecoverValuesFalse() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -1145,8 +1104,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
   }
  
   @Test
-  public void testDiskIDFieldsForPersistOverFlowRecoverValuesFalse()
-      throws Exception {
+  public void testDiskIDFieldsForPersistOverFlowRecoverValuesFalse() throws Exception {
     String oldValue = System.getProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
     try {
@@ -1224,7 +1182,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
 
   @Test
   public void testBug40375() throws Exception {
-      try {
+    try {
       diskProps.setDiskDirs(dirs);
       diskProps.setPersistBackup(true);
       diskProps.setSynchronous(true);
@@ -1249,66 +1207,65 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase
       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
       assertEquals(4, region.size());
       region.close();
-      }finally {
-
+    } finally {
       System.setProperty(DiskStoreImpl.COMPLETE_COMPACTION_BEFORE_TERMINATION_PROPERTY_NAME,"");
-        }
+    }
   }
      
   @Test
   public void testBug41340() throws Exception {
-       diskProps.setDiskDirs(dirs);
-       diskProps.setPersistBackup(true);
-       diskProps.setSynchronous(true);
-       diskProps.setRolling(true);
-       diskProps.setRegionName("testBug41340");
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       assertEquals(0, region.size());
-       //put some entries
-       region.put("0","0");
-       region.put("1","1");
-       region.put("2","2");
-       region.put("3","3");
-       
-       
-       //Create another oplog
-       DiskStore store = cache.findDiskStore(region.getAttributes().getDiskStoreName());
-       store.forceRoll();
-       
-       //Now create and destroy all of the entries in the new
-       //oplog. This should cause us to remove the CRF but leave
-       //the DRF, which has creates in reverse order. Now we have
-       //garbage destroys which have higher IDs than any crate
-       region.put("4","1");
-       region.put("5","2");
-       region.put("6","3");
-       region.destroy("0");
-       region.destroy("6");
-       region.destroy("5");
-       region.destroy("4");
-       
-       store.forceRoll();
-       
-       //Force a recovery
-       GemFireCacheImpl.getInstance().close();
-       cache = createCache();
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       assertEquals(3, region.size());
-       
-       //With bug 41340, this is reusing an oplog id.
-       region.put("7","7");
+     diskProps.setDiskDirs(dirs);
+     diskProps.setPersistBackup(true);
+     diskProps.setSynchronous(true);
+     diskProps.setRolling(true);
+     diskProps.setRegionName("testBug41340");
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+     assertEquals(0, region.size());
+     //put some entries
+     region.put("0","0");
+     region.put("1","1");
+     region.put("2","2");
+     region.put("3","3");
+
+
+     //Create another oplog
+     DiskStore store = cache.findDiskStore(region.getAttributes().getDiskStoreName());
+     store.forceRoll();
+
+     //Now create and destroy all of the entries in the new
+     //oplog. This should cause us to remove the CRF but leave
+     //the DRF, which has creates in reverse order. Now we have
+     //garbage destroys which have higher IDs than any crate
+     region.put("4","1");
+     region.put("5","2");
+     region.put("6","3");
+     region.destroy("0");
+     region.destroy("6");
+     region.destroy("5");
+     region.destroy("4");
+
+     store.forceRoll();
+
+     //Force a recovery
+     GemFireCacheImpl.getInstance().close();
+     cache = createCache();
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+     assertEquals(3, region.size());
+
+     //With bug 41340, this is reusing an oplog id.
+     region.put("7","7");
 //       region.close();
-       
-       //Force another recovery
-       GemFireCacheImpl.getInstance().close();
-       cache = createCache();
-       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
-       
-       //Make sure we didn't lose the entry
-       assertEquals(4, region.size());
-       assertEquals("7", region.get("7"));
-       region.close();
-     }
+
+     //Force another recovery
+     GemFireCacheImpl.getInstance().close();
+     cache = createCache();
+     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
+
+     //Make sure we didn't lose the entry
+     assertEquals(4, region.size());
+     assertEquals("7", region.get("7"));
+     region.close();
+   }
   
   @Test
   public void testRecoverValuesFalse() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
index 350a588..84c8500 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionAsyncRecoveryJUnitTest.java
@@ -16,10 +16,7 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -28,7 +25,6 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -37,15 +33,11 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.internal.FileUtil;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
-/**
- *
- */
 @Category(IntegrationTest.class)
 public class DiskRegionAsyncRecoveryJUnitTest extends DiskRegionTestingBase {
 
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     DiskStoreObserver.setInstance(null);
     System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
     System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b914df23/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
index 07bd273..51352d1 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionChangingRegionAttributesJUnitTest.java
@@ -16,11 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -32,47 +29,33 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * if the the region attributes are changed after starting it again.
  * 
  * The behaviour should be predictable
- * 
- *
  */
 @Category(IntegrationTest.class)
-public class DiskRegionChangingRegionAttributesJUnitTest extends
-    DiskRegionTestingBase
-{
+public class DiskRegionChangingRegionAttributesJUnitTest extends DiskRegionTestingBase {
+
+  private DiskRegionProperties props;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     props = new DiskRegionProperties();
     props.setDiskDirs(dirs);
-    
-  }
-
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
   }
-  
 
-  private DiskRegionProperties props;
-  
-  private void createOverflowOnly(){
+  private void createOverflowOnly() {
     props.setOverFlowCapacity(1);
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,props);
   }
   
-  private void createPersistOnly(){
+  private void createPersistOnly() {
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,props, Scope.LOCAL);
   }
   
-  private void createPersistAndOverflow(){
+  private void createPersistAndOverflow() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,props); 
   }
   
   @Test
-  public void testOverflowOnlyAndThenPersistOnly(){
+  public void testOverflowOnlyAndThenPersistOnly() {
     createOverflowOnly();
     put100Int();
     region.close();
@@ -81,7 +64,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistOnlyAndThenOverflowOnly(){
+  public void testPersistOnlyAndThenOverflowOnly() {
     createPersistOnly();
     put100Int();
     region.close();
@@ -97,7 +80,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testOverflowOnlyAndThenPeristAndOverflow(){
+  public void testOverflowOnlyAndThenPeristAndOverflow() {
     createOverflowOnly();
     put100Int();
     region.close();
@@ -106,7 +89,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistAndOverflowAndThenOverflowOnly(){
+  public void testPersistAndOverflowAndThenOverflowOnly() {
     createPersistAndOverflow();
     put100Int();
     region.close();
@@ -119,7 +102,7 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
  @Test
-  public void testPersistOnlyAndThenPeristAndOverflow(){
+  public void testPersistOnlyAndThenPeristAndOverflow() {
    createPersistOnly();
    put100Int();
    region.close();
@@ -128,15 +111,11 @@ public class DiskRegionChangingRegionAttributesJUnitTest extends
   }
   
   @Test
-  public void testPersistAndOverflowAndThenPersistOnly(){
+  public void testPersistAndOverflowAndThenPersistOnly() {
     createPersistAndOverflow();
     put100Int();
     region.close();
     createPersistOnly();
     assertTrue(region.size()==100);
   }
-  
-  
-  
 }
-