You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/06/02 23:16:55 UTC

[3/3] incubator-geode git commit: Test cleanup.

Test cleanup.

* update from junit.framework.Assert to org.junit.Assert
* fix exception eating
* remove empty or useless javadocs, dead code, etc


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/497208f2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/497208f2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/497208f2

Branch: refs/heads/feature/GEODE-837
Commit: 497208f2ef5ae27c396e6dd74c62e61c28af7d0d
Parents: 24aa643
Author: Kirk Lund <kl...@apache.org>
Authored: Thu Jun 2 16:14:48 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Thu Jun 2 16:14:48 2016 -0700

----------------------------------------------------------------------
 .../ConcurrentRegionOperationsJUnitTest.java    |  32 +-
 .../internal/cache/DiskRegionJUnitTest.java     |  70 +-
 .../internal/cache/MapInterfaceJUnitTest.java   |  41 +-
 .../gemfire/internal/cache/OffHeapTestUtil.java |   8 +-
 ...rtitionedRegionCacheLoaderForRootRegion.java |  18 +-
 ...artitionedRegionCacheLoaderForSubRegion.java |  17 +-
 ...egionOverflowAsyncRollingOpLogJUnitTest.java |  54 +-
 .../cache/execute/PerformanceTestFunction.java  |   6 +-
 .../cache/ha/Bug36853EventsExpiryDUnitTest.java |  56 +-
 .../cache/ha/HARQAddOperationJUnitTest.java     | 160 +----
 .../internal/cache/ha/HARegionJUnitTest.java    | 144 ++--
 .../cache/ha/HARegionQueueJUnitTest.java        | 653 ++++++-------------
 .../cache/partitioned/Bug39356DUnitTest.java    |  27 +-
 .../sockets/InterestListEndpointDUnitTest.java  |  10 +-
 .../LogWriterDisabledPerformanceTest.java       |  30 +-
 .../logging/LogWriterPerformanceTest.java       |  31 +-
 .../logging/LoggingPerformanceTestCase.java     |  37 +-
 .../internal/logging/TestLogWriterFactory.java  |   3 +-
 .../log4j/Log4J2DisabledPerformanceTest.java    |  29 +-
 .../logging/log4j/Log4J2PerformanceTest.java    |  35 +-
 .../LogWriterLoggerDisabledPerformanceTest.java |  25 +-
 .../log4j/LogWriterLoggerPerformanceTest.java   |  39 +-
 .../gemfire/pdx/JSONFormatterJUnitTest.java     |  34 +-
 .../pdx/JSONPdxClientServerDUnitTest.java       |  71 +-
 .../cq/dunit/CqQueryUsingPoolDUnitTest.java     |  23 +-
 .../tier/sockets/DurableClientTestCase.java     |   5 +-
 .../tools/pulse/testbed/driver/PulseUITest.java |  62 +-
 .../tools/pulse/tests/PulseAbstractTest.java    | 177 +++--
 .../tools/pulse/tests/PulseAutomatedTest.java   |  79 +--
 .../tools/pulse/tests/PulseBaseTest.java        |  77 +--
 30 files changed, 693 insertions(+), 1360 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
index 1e63ea1..032301f 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
@@ -16,9 +16,7 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.Collection;
 import java.util.Iterator;
@@ -31,8 +29,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -60,12 +56,9 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * recover the old values and again the two regions are checked for equality.
  *  * This test is run for all modes persist, persist+overflow, overflow only in
  * syn and async mode.
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
-{
+public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase {
 
   private int numberOfPutsThreads = 5;
 
@@ -107,8 +100,8 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
 
   private static int counter = 0;
 
-  @Before
-  protected final void postsetUp() throws Exception {
+  @Override
+  protected final void postSetUp() throws Exception {
     counter++;
     if (longTest) {
       TIME_TO_RUN = 10000;
@@ -322,11 +315,10 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
         p);
     validate(region1, region2);
   }
+
   /**
    * Tests the bug where a get operation on an evicted entry fails to get value
    * as the oplog is deleted by the roller, but the entry was not rolled.
-   * 
-   *  
    */
   @Test
   public void testBug35048()
@@ -437,7 +429,6 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
     ThreadUtils.join(t1, 30 * 1000);
     ThreadUtils.join(t2, 30 * 1000);
     assertTrue(!failure);
-
   }
 
   private final AtomicBoolean timeToStop = new AtomicBoolean();
@@ -451,9 +442,9 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
     try {
       startLine.await();
     } catch (InterruptedException ie) {
-      fail("unexpected " + ie);
+      throw new AssertionError("unexpected ", ie);
     } catch (BrokenBarrierException ex) {
-      fail("unexpected " + ex);
+      throw new AssertionError("unexpected ", ex);
     }
   }
   
@@ -683,7 +674,7 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
       catch (Exception e) {
         exceptionOccuredInGets = true;
         logWriter.severe("Exception occured in get ", e);
-        fail(" failed during get due to " + e);
+        throw new AssertionError(" failed during get due to ", e);
       }
     } finally {
       if (lock != null) {
@@ -756,7 +747,7 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
     catch (Exception e) {
       exceptionOccuredInClears = true;
       logWriter.severe("Exception occured in clear=",e);
-      fail("Exception occured in clear");
+      throw new AssertionError("Exception occured in clear", e);
     }
   }
 
@@ -768,8 +759,6 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
    * because a clear operation stop/starts the roller) & the destroy operation
    * actually joined with the new thread ( different from the one on which
    * notification was issued to exit).
-   * 
-   *  
    */
   @Test
   public void testConcurrentClearAndRegionDestroyBug()
@@ -904,8 +893,7 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
       catch (Exception e) {
         exceptionOccuredInForceRolls = true;
         logWriter.severe("Exception occured in forceRolling ", e);
-        fail(" Exception occured here");
-
+        throw new AssertionError(" Exception occured here", e);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
index 009e4df..ddc1d6e 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
@@ -27,13 +27,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import junit.framework.Assert;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
 
-import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EntryEvent;
@@ -53,7 +51,7 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 /**
  * TODO: fails when running integrationTest from gradle command-line on Windows 7
  * 
- * JUnit tests covering some miscellaneous functionalites of Disk Region.
+ * JUnit tests covering some miscellaneous functionality of Disk Region.
  */
 @Category(IntegrationTest.class)
 public class DiskRegionJUnitTest extends DiskRegionTestingBase {
@@ -97,6 +95,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
   private int counter = 0;
   private boolean hasBeenNotified = false;
 
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
   @Override
   protected final void postSetUp() throws Exception {
     this.exceptionOccured = false;
@@ -138,7 +139,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     assertNotNull(cacheListener.lastEvent);
     assertEquals(null, cacheListener.lastEvent.getOldValue());
 
-    Assert.assertTrue(region.get("1")==null);
+    assertTrue(region.get("1")==null);
     
     boolean exceptionOccured = false;
     try {
@@ -158,7 +159,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     region.close();
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,props);
     
-    Assert.assertTrue(region.get("1")==null);
+    assertTrue(region.get("1")==null);
     region.destroyRegion();
   }
 
@@ -356,8 +357,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
           catch (InterruptedException e) {
             testFailed = true;
             failureCause = "interrupted exception not expected here";
-            fail("exception not expected here");
-
+            throw new AssertionError("exception not expected here", e);
           }
         }
         region.get(new Integer(0));
@@ -738,9 +738,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
     diskRegionProperties.setRegionName("IGNORE_EXCEPTION_testSingleDirectorySizeViolation");
     //setting to null will make only one directory
-    File dir = new File("testSingleDirectoryNotHanging");
-    dir.mkdir();
-    dir.deleteOnExit();
+    File dir = temporaryFolder.newFolder("testSingleDirectoryNotHanging");
     File[] dirs = new File[] {dir};
     int[] dirSizes = { 2048 };
     diskRegionProperties.setDiskDirsAndSizes(dirs, dirSizes);
@@ -857,7 +855,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (DiskAccessException e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+      throw new AssertionError("FAILED::", e);
     }
 
     // we should have put 2 values in each dir so the next one should not fit
@@ -868,7 +866,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (DiskAccessException e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+      throw new AssertionError("FAILED::", e);
     }
 
     assertEquals(false, cache.isClosed());
@@ -913,7 +911,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (DiskAccessException e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+      throw new AssertionError("FAILED::", e);
     }
 
     // we should have put 2 values in each dir so the next one should not fit
@@ -974,7 +972,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (DiskAccessException e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+      throw new AssertionError("FAILED::", e);
     }
 
     // we should have put 2 values in each dir so the next one should not fit
@@ -985,7 +983,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (DiskAccessException e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("FAILED::" + e.toString());
+      throw new AssertionError("FAILED::", e);
     }
 
     assertEquals(false, cache.isClosed());
@@ -1081,7 +1079,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
           logWriter.error("Exception occured but not expected", e);
           testFailed = true;
           failureCause = "FAILED::" + e.toString();
-          fail("FAILED::" + e.toString());
+          throw new AssertionError("FAILED::", e);
         }
 
         final Thread t1 = Thread.currentThread();
@@ -1299,7 +1297,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (Exception e) {
       logWriter.error("Exception occured but not expected", e);
-      fail("Failed while getting invalid entry:" + e.toString());
+      throw new AssertionError("Failed while getting invalid entry:", e);
 
     }
     assertTrue("get operation on invalid entry returned non null value",
@@ -1339,8 +1337,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("Failed to get the value on disk");
-
+      throw new AssertionError("Failed to get the value on disk", ex);
     }
     //verify that the value retrieved above represents byte array.
     //verify the length of the byte[]
@@ -1554,20 +1551,9 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     diskRegionProperties.setSynchronous(true);   
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache, diskRegionProperties);
     byte[] bytes = new byte[256];
-    try {
     for(int i=0; i<1500; i++){
       region.put(new Integer(i%10),bytes);
     }  
-    }
-    catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-    }
-    catch(Throwable th) {
-      th.printStackTrace();
-      logWriter.error(th);
-      fail("Test failed due to exception (see logs for details):" + th);
-    }    
   }
   
   /**
@@ -1671,9 +1657,6 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
       assertEquals(1, DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
           props, Scope.LOCAL).size());
     }
-    catch (Exception e) {
-      logWriter.error("Test failed", e);
-    }
     finally {
       CacheObserverHolder.setInstance(old);
       LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
@@ -1729,9 +1712,6 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
       assertEquals(1,DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
           props, Scope.LOCAL).size());
     }
-    catch (Exception e) {
-      fail("Exception not expected but did occur due to " + e);
-    }
     finally {
       CacheObserverHolder.setInstance(old);
       LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
@@ -2051,14 +2031,10 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
     region.create("key2", "value2");
     region.create("key3", "value3");
     ((LocalRegion)region).getDiskRegion().testHookCloseAllOverflowChannels();
-    try {
-      //Update key1, so that key2 goes on disk & encounters an exception
-      region.destroy("key1");
-      region.destroy("key3");
-    }
-    catch (DiskAccessException dae) {
-      fail("Should not have encountered DiskAccessException");
-    }
+
+    //Update key1, so that key2 goes on disk & encounters an exception
+    region.destroy("key1");
+    region.destroy("key3");
   }
   
   /**
@@ -2454,7 +2430,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase {
         }
       }
       assertTrue(i > 1);
-      Assert.assertTrue(switchedOplog[0].getOplogFile().delete());
+      assertTrue(switchedOplog[0].getOplogFile().delete());
       region.close();
       //We don't validate the oplogs until we recreate the disk store.
       DiskStoreImpl store = ((LocalRegion) region).getDiskStore();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterfaceJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterfaceJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterfaceJUnitTest.java
index e3a3324..b648efd 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterfaceJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterfaceJUnitTest.java
@@ -16,18 +16,15 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import java.util.*;
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
@@ -61,14 +58,14 @@ public class MapInterfaceJUnitTest {
           .create());
     }
     catch (Exception e) {
-      fail(" failed due to " + e);
+      throw new AssertionError(" failed due to ", e);
     }
     for (int i = 0; i < 100; i++) {
       region.put(new Integer(i), new Integer(i));
     }
-    Assert.assertEquals(new Integer(50), region.get(new Integer(50)));
+    assertEquals(new Integer(50), region.get(new Integer(50)));
     region.localClear();
-    Assert.assertEquals(null, region.get(new Integer(50)));
+    assertEquals(null, region.get(new Integer(50)));
     region.close();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
@@ -77,7 +74,7 @@ public class MapInterfaceJUnitTest {
           .create());
     }
     catch (Exception e) {
-      fail(" failed in creating region due to " + e);
+      throw new AssertionError(" failed in creating region due to ", e);
     }
     boolean exceptionOccured = false;
     try {
@@ -114,7 +111,7 @@ public class MapInterfaceJUnitTest {
           .create());
     }
     catch (Exception e) {
-      fail(" failed due to " + e);
+      throw new AssertionError(" failed due to ", e);
     }
     HashMap m = new HashMap();
     m.put("aKey", "aValue");
@@ -125,9 +122,9 @@ public class MapInterfaceJUnitTest {
     for (int i = 0; i < 100; i++) {
       region.put(new Integer(i), new Integer(i));
     }
-    Assert.assertEquals(new Integer(50), region.get(new Integer(50)));
+    assertEquals(new Integer(50), region.get(new Integer(50)));
     region.localClear();
-    Assert.assertEquals(null, region.get(new Integer(50)));
+    assertEquals(null, region.get(new Integer(50)));
     region.close();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
@@ -136,7 +133,7 @@ public class MapInterfaceJUnitTest {
           .create());
     }
     catch (Exception e) {
-      fail(" failed in creating region due to " + e);
+      throw new AssertionError(" failed in creating region due to ", e);
     }
     boolean exceptionOccured = false;
     try {
@@ -192,14 +189,14 @@ public class MapInterfaceJUnitTest {
       }
     }
     catch (Exception e) {
-      fail(" failed due to " + e);
+      throw new AssertionError(" failed due to ", e);
     }
     for (int i = 0; i < 100; i++) {
       region.put(new Integer(i), new Integer(i));
     }
-    Assert.assertEquals(new Integer(50), region.get(new Integer(50)));
+    assertEquals(new Integer(50), region.get(new Integer(50)));
     region.localClear();
-    Assert.assertEquals(null, region.get(new Integer(50)));
+    assertEquals(null, region.get(new Integer(50)));
     region.close();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
@@ -208,7 +205,7 @@ public class MapInterfaceJUnitTest {
           .create());
     }
     catch (Exception e) {
-      fail(" failed in creating region due to " + e);
+      throw new AssertionError(" failed in creating region due to ", e);
     }
     boolean exceptionOccured = false;
     try {
@@ -267,10 +264,10 @@ public class MapInterfaceJUnitTest {
         fail(" beforeCreate call back did not come");
       }
       
-      Assert.assertEquals(counter,1);
+      assertEquals(counter,1);
     }
     catch (Exception e) {
-      fail(" failed due to " + e);
+      throw new AssertionError(" failed due to ", e);
     }
    
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
index f9d2c2a..2d74ab9 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
@@ -16,11 +16,11 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.util.Collections;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.internal.offheap.MemoryBlock;
 import com.gemstone.gemfire.internal.offheap.RefCountChangeInfo;
@@ -30,7 +30,7 @@ import com.gemstone.gemfire.internal.offheap.MemoryAllocatorImpl;
 @SuppressWarnings("deprecation")
 public class OffHeapTestUtil {
 
-  public static void checkOrphans() { // TODO:KIRK: need to do something special to guarantee proper tearDown
+  public static void checkOrphans() {
     MemoryAllocatorImpl allocator = null;
     try {
       allocator = MemoryAllocatorImpl.getAllocator();
@@ -57,7 +57,7 @@ public class OffHeapTestUtil {
       System.out.println("Sample orphan: " + orphans.get(0));
       System.out.println("Orphan info: " + info);
     }
-    Assert.assertEquals(Collections.emptyList(), orphans);
+    assertEquals(Collections.emptyList(), orphans);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForRootRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForRootRegion.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForRootRegion.java
index 3e9ac02..b9aa48a 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForRootRegion.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForRootRegion.java
@@ -16,9 +16,9 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import java.util.Properties;
+import static org.junit.Assert.*;
 
-import junit.framework.Assert;
+import java.util.Properties;
 
 import com.gemstone.gemfire.cache.CacheLoader;
 import com.gemstone.gemfire.cache.CacheLoaderException;
@@ -28,18 +28,17 @@ import com.gemstone.gemfire.cache.Declarable;
 /**
  * This class is CacheLoader for partition region
  */
-public class PartitionedRegionCacheLoaderForRootRegion implements CacheLoader,
-		Declarable {
+public class PartitionedRegionCacheLoaderForRootRegion implements CacheLoader, Declarable {
 
+  @Override
 	public Object load(LoaderHelper helper) throws CacheLoaderException {
 
 		/* checking the attributes set in xml file. */
 		PartitionedRegion pr = (PartitionedRegion) helper.getRegion();
 		if (pr.getAttributes().getPartitionAttributes().getRedundantCopies() != 1)
-			Assert
-					.fail("Redundancy of the partition region is not 1");
+			fail("Redundancy of the partition region is not 1");
 
-		Assert.assertEquals(
+		assertEquals(
                     pr.getAttributes().getPartitionAttributes().getLocalMaxMemory(), 200);
 
 		/*
@@ -47,15 +46,14 @@ public class PartitionedRegionCacheLoaderForRootRegion implements CacheLoader,
 		 * not
 		 */
 		return helper.getKey();
-
 	}
 
+  @Override
 	public void close() {
-
 	}
 
+  @Override
 	public void init(Properties props) {
-
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForSubRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForSubRegion.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForSubRegion.java
index e0ba2c2..f824ab5 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForSubRegion.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheLoaderForSubRegion.java
@@ -16,9 +16,9 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import java.util.Properties;
+import static org.junit.Assert.*;
 
-import junit.framework.Assert;
+import java.util.Properties;
 
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.CacheLoader;
@@ -31,34 +31,33 @@ import com.gemstone.gemfire.cache.Declarable;
  */
 public class PartitionedRegionCacheLoaderForSubRegion implements CacheLoader, Declarable {
 
+  @Override
 	public Object load(LoaderHelper helper) throws CacheLoaderException {
 
 		/* checking the attributes set in xml file */
 		PartitionedRegion pr = (PartitionedRegion) helper.getRegion();
 		if (pr.getAttributes().getPartitionAttributes().getRedundantCopies() != 1)
-			Assert
-					.fail("Redundancy of the partition region is not 1");
+			fail("Redundancy of the partition region is not 1");
 		
-		Assert.assertEquals(pr.getAttributes()
+		assertEquals(pr.getAttributes()
 				.getPartitionAttributes().getGlobalProperties().getProperty(
 						PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_PROPERTY),
 				"11");
-		Assert.assertEquals(pr.getAttributes()
+		assertEquals(pr.getAttributes()
 				.getPartitionAttributes().getLocalMaxMemory(), 200);
 		/*
 		 * Returning the same key. This is to check CaccheLoader is invoked or
 		 * not
 		 */
 		return helper.getKey();
-
 	}
 
+  @Override
 	public void close() {
-
 	}
 
+  @Override
 	public void init(Properties props) {
-
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
index 296ae00..2031fec 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
@@ -16,19 +16,20 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.LogWriter;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.internal.cache.CacheObserverAdapter;
+import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
+import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
+import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
+import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
-import com.gemstone.gemfire.internal.cache.*;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
@@ -37,23 +38,19 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * 1) Performance of Get Operation for Entry faulting in from current Op Log 2)
  * Performance of Get operation for Entry faulting in from previous Op Log 3)
  * Performance of Get operation for Entry faulting in from H Tree
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
-    DiskRegionTestingBase
-{
+public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends DiskRegionTestingBase {
 
+  private static int counter = 0;
 
-  
-  LogWriter log = null;
+  private static int ENTRY_SIZE = 1024;
 
-  static int counter = 0;
+  private volatile boolean afterHavingCompacted = false;
 
-  private static int ENTRY_SIZE = 1024;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
   @Override
   protected final void postSetUp() throws Exception {
@@ -69,7 +66,7 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
         diskProps);
   }
 
-  @After
+  @Override
   protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
@@ -87,7 +84,7 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
     
   }
 
-  public void populateFirst0k_10Kbwrites()
+  private void populateFirst0k_10Kbwrites()
   {
     
    final byte[] value = new byte[ENTRY_SIZE];
@@ -121,9 +118,8 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
             + statsGet);
 
   }
-  
-  protected volatile boolean afterHavingCompacted = false;
-  public void populateSecond10kto20kwrites()
+
+  private void populateSecond10kto20kwrites()
   {
     afterHavingCompacted = false;
     DiskRegionTestingBase.setCacheObserverCallBack();
@@ -201,11 +197,12 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
             + statsGet2);
     DiskRegionTestingBase.unSetCacheObserverCallBack();
   }
-/**
- * getLRUStats
- * @param region1
- * @return
- */
+
+  /**
+   * getLRUStats
+   * @param region1
+   * @return
+   */
   protected LRUStatistics getLRUStats(Region region1)
   {
     return ((LocalRegion)region1).getEvictionController().getLRUHelper()
@@ -214,4 +211,3 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
   }
 
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PerformanceTestFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PerformanceTestFunction.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PerformanceTestFunction.java
index 180ec52..7e1a0fd 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PerformanceTestFunction.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PerformanceTestFunction.java
@@ -16,12 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.execute;
 
+import static org.junit.Assert.*;
+
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Set;
 
-import junit.framework.Assert;
-
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.execute.FunctionAdapter;
 import com.gemstone.gemfire.cache.execute.FunctionContext;
@@ -46,7 +46,7 @@ public class PerformanceTestFunction extends FunctionAdapter {
       Region fcd = PartitionRegionHelper.getLocalDataForContext(prContext);
       for (Iterator i = allKeysSet.iterator(); i.hasNext();) {
         Object val = fcd.get(i.next());
-        Assert.assertNotNull(val);
+        assertNotNull(val);
         vals.add(val);
       }
       context.getResultSender().lastResult(vals);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
index 4611fb6..d30420a 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
@@ -16,18 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
 import static org.junit.Assert.*;
 
-import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
-import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-
 import java.util.Properties;
 
-import junit.framework.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
@@ -39,8 +33,8 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
-import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
@@ -49,6 +43,8 @@ import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
  * This is a bug test for 36853 (Expiry logic in HA is used to expire early data
@@ -62,30 +58,27 @@ import com.gemstone.gemfire.test.dunit.VM;
  * is set for delayed start. This will make some of the events in the queue
  * expire before dispatcher can start picking them up for delivery to the
  * client.
- * 
- * 
  */
 @Category(DistributedTest.class)
-public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
-{
+public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase {
 
   /** Cache-server */
-  VM server = null;
+  private VM server = null;
 
   /** Client , connected to Cache-server */
-  VM client = null;
+  private VM client = null;
 
   /** Name of the test region */
   private static final String REGION_NAME = "Bug36853EventsExpiryDUnitTest_region";
 
   /** The cache instance for test cases */
-  protected static Cache cache = null;
+  private static Cache cache = null;
 
   /** Boolean to indicate the client to proceed for validation */
-  protected static volatile boolean proceedForValidation = false;
+  private static volatile boolean proceedForValidation = false;
 
   /** Counter to indicate number of puts recieved by client */
-  protected static volatile int putsRecievedByClient;
+  private static volatile int putsRecievedByClient;
 
   /** The last key for operations, to notify for proceeding to validation */
   private static final String LAST_KEY = "LAST_KEY";
@@ -96,21 +89,6 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
   /** Number of puts done for the test */
   private static final int TOTAL_PUTS = 5;
 
-  /**
-   * Constructor
-   * 
-   * @param name
-   */
-  public Bug36853EventsExpiryDUnitTest() {
-    super();
-  }
-
-  /**
-   * Sets up the cache-server and client for the test
-   * 
-   * @throws Exception -
-   *           thrown in any problem occurs in setUp
-   */
   @Override
   public final void preSetUp() throws Exception {
     disconnectAllFromDS();
@@ -145,7 +123,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
   /**
    * Creates cache and starts the bridge-server
    */
-  public static Integer createServerCache() throws Exception
+  private static Integer createServerCache() throws Exception
   {
     System.setProperty(HARegionQueue.REGION_ENTRY_EXPIRY_TIME, "1");
     System.setProperty("slowStartTimeForTesting", String
@@ -175,7 +153,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
    * @throws Exception -
    *           thrown if any problem occurs in setting up the client
    */
-  public static void createClientCache(String hostName, Integer port)
+  private static void createClientCache(String hostName, Integer port)
     throws Exception {
     Properties props = new Properties();
     props.setProperty("mcast-port", "0");
@@ -222,7 +200,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
    * @throws Exception -
    *           thrown if any problem occurs in put operation
    */
-  public static void generateEvents() throws Exception
+  private static void generateEvents() throws Exception
   {
     String regionName = Region.SEPARATOR + REGION_NAME;
     Region region = cache.getRegion(regionName);
@@ -262,7 +240,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
    * Waits for the listener to receive all events and validates that no
    * exception occured in client
    */
-  public static void validateEventCountAtClient() throws Exception
+  private static void validateEventCountAtClient() throws Exception
   {
     if (!proceedForValidation) {
       synchronized (Bug36853EventsExpiryDUnitTest.class) {
@@ -278,7 +256,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
       }
     }
     LogWriterUtils.getLogWriter().info("Starting validation on client2");
-    Assert.assertEquals(
+    assertEquals(
         "Puts recieved by client not equal to the puts done at server.",
         TOTAL_PUTS, putsRecievedByClient);
     LogWriterUtils.getLogWriter()
@@ -291,7 +269,7 @@ public class Bug36853EventsExpiryDUnitTest extends JUnit4CacheTestCase
    * Closes the cache
    * 
    */
-  public static void unSetExpiryTimeAndCloseCache()
+  private static void unSetExpiryTimeAndCloseCache()
   {    
     System.clearProperty(HARegionQueue.REGION_ENTRY_EXPIRY_TIME);
     CacheTestCase.closeCache();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
index 1c7b0d0..c50c28b 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
@@ -16,11 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
+import static org.junit.Assert.*;
+
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 
 import org.apache.logging.log4j.Logger;
 import org.junit.After;
@@ -28,11 +29,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
@@ -43,7 +39,6 @@ import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
-import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.logging.LogService;
@@ -52,12 +47,9 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Test to verify Add operation to HARegion Queue with and without conflation.
- * 
  */
-
 @Category(IntegrationTest.class)
-public class HARQAddOperationJUnitTest
-{
+public class HARQAddOperationJUnitTest {
   private static final Logger logger = LogService.getLogger();
 
   /** The cache instance */
@@ -85,78 +77,40 @@ public class HARQAddOperationJUnitTest
   
   volatile static int expiryCount = 0;
   
-  /**
-   * Create the cache in setup
-   * 
-   * @throws Exception -
-   *           thrown if any exception occurs in setUp
-   */
   @Before
-  public void setUp() throws Exception
-  {
+  public void setUp() throws Exception {
     this.cache = createCache();
     this.logWriter = cache.getLogger();
   }
 
-  /**
-   * Close the cache in tear down *
-   * 
-   * @throws Exception -
-   *           thrown if any exception occurs in tearDown
-   */
   @After
-  public void tearDown() throws Exception
-  {
+  public void tearDown() throws Exception {
     this.cache.close();
   }
 
   /**
    * Creates the cache instance for the test
-   * 
-   * @return the cache instance
-   * @throws CacheException -
-   *           thrown if any exception occurs in cache creation
    */
-  private Cache createCache() throws CacheException
-  {
+  private Cache createCache() throws CacheException {
     return new CacheFactory().set("mcast-port", "0").create();
   }
 
   /**
    * Creates HA region-queue object
-   * 
-   * @return HA region-queue object
-   * @throws IOException
-   * @throws ClassNotFoundException
-   * @throws CacheException
-   * @throws InterruptedException
    */
-  protected HARegionQueue createHARegionQueue(String name)
-      throws IOException, ClassNotFoundException, CacheException, InterruptedException
-  {
+  protected HARegionQueue createHARegionQueue(String name) throws IOException, ClassNotFoundException, CacheException, InterruptedException {
     AttributesFactory factory = new AttributesFactory();
     factory.setDataPolicy(DataPolicy.REPLICATE);
     factory.setScope(Scope.DISTRIBUTED_ACK);
-    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name,
-        cache, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
+    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name, cache, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
     return regionqueue;
   }
 
   /**
    * Creates HA region-queue object
-   * 
-   * @return HA region-queue object
-   * @throws IOException
-   * @throws ClassNotFoundException
-   * @throws CacheException
-   * @throws InterruptedException
    */
-  protected HARegionQueue createHARegionQueue(String name,
-      HARegionQueueAttributes attrs) throws IOException, ClassNotFoundException, CacheException, InterruptedException
-  {
-
-    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name,
-        cache, attrs, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
+  protected HARegionQueue createHARegionQueue(String name, HARegionQueueAttributes attrs) throws IOException, ClassNotFoundException, CacheException, InterruptedException {
+    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name, cache, attrs, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
     return regionqueue;
   }
 
@@ -166,11 +120,9 @@ public class HARQAddOperationJUnitTest
    * to value 2 3) Available IDs , Last DispatchedWrapper Set & Conflation Map
    * should have size 1. 4) Conflation Map , LastDispatchedWrapper Set &
    * Available IDs should have counter corresponding to second operation
-   * 
    */
   @Test
-  public void testQueueAddOperationWithConflation() throws Exception
-  {
+  public void testQueueAddOperationWithConflation() throws Exception {
     this.logWriter
         .info("HARegionQueueJUnitTest : testQueueAddOperationWithConflation BEGIN");
     this.rq = createHARegionQueue("testQueueAddOperationWithConflation");
@@ -202,11 +154,9 @@ public class HARQAddOperationJUnitTest
    * 3) This wrapper should have a set with size 1. 4) The available IDs set
    * shoudl have size 1. 5) Put another object by same thread. 6) The wrapper
    * set & availableIs List should have size 2 .
-   * 
    */
   @Test
-  public void testQueueAddOperationWithoutConflation() throws Exception
-  {
+  public void testQueueAddOperationWithoutConflation() throws Exception {
     this.logWriter
         .info("HARegionQueueJUnitTest : testQueueAddOperationWithoutConflation BEGIN");
     this.rq = createHARegionQueue("testQueueAddOperationWithConflation");
@@ -250,12 +200,9 @@ public class HARQAddOperationJUnitTest
    * available IDs , LastDispatchedWrapper's Set should have size 0. Events map
    * containg should have size 1 ( corresponding to the
    * lastDispatchedAndCurrentEvent Wrapper objcet)
-   * 
-   * @throws Exception
    */
   @Test
-  public void testQueueAddTakeOperationWithoutConflation() throws Exception
-  {
+  public void testQueueAddTakeOperationWithoutConflation() throws Exception {
     this.logWriter
         .info("HARegionQueueJUnitTest : testQueueAddTakeOperationWithoutConflation BEGIN");
 
@@ -280,12 +227,9 @@ public class HARQAddOperationJUnitTest
    * with Threaddentifer as key & sequence as the value for Expiry. Perform a
    * take operation. Validate that expiry on ThreadIdentifier removes itself
    * from Events Map
-   * 
-   * 
    */
   @Test
-  public void testExpiryOnThreadIdentifier()
-  {
+  public void testExpiryOnThreadIdentifier() {
     try {
       HARegionQueueAttributes attrs = new HARegionQueueAttributes();
       attrs.setExpiryTime(2);
@@ -323,12 +267,9 @@ public class HARQAddOperationJUnitTest
    * expiry. Validate the data present in Queue experiences expiry. After the
    * expiry of the data , AvaialbleIds size should be 0, entry removed from
    * Region, LastDispatchedWrapperSet should have size 0.
-   * 
-   * 
    */
   @Test
-  public void testNoExpiryOnThreadIdentifier()
-  {
+  public void testNoExpiryOnThreadIdentifier() {
     try {
       HARegionQueueAttributes hqa = new HARegionQueueAttributes();
       hqa.setExpiryTime(8);
@@ -381,11 +322,9 @@ public class HARQAddOperationJUnitTest
    * queue contains objects from 1- 10. QRM with sequenceID 5 arrives It should
    * remove only remove objects for 1- 5. Then sequenceID 10 come which should
    * remove 5-10.
-   * 
    */
   @Test
-  public void testMultipleQRMArrival() throws Exception
-  {
+  public void testMultipleQRMArrival() throws Exception {
     HARegionQueue regionqueue = createHARegionQueue("testNoExpiryOnThreadIdentifier");
 
     EventID[] ids = new EventID[10];
@@ -415,7 +354,6 @@ public class HARQAddOperationJUnitTest
 
     regionqueue.removeDispatchedEvents(ids[9]);
     assertEquals(0, regionqueue.getAvalaibleIds().size());
-
   }
 
   /**
@@ -425,11 +363,9 @@ public class HARQAddOperationJUnitTest
    * before QRM thread acts , the object should be present in the
    * lastDispatchedSet & AvailableID. Then the QRM thread gets unblocked , it
    * should remove from the available ID.
-   * 
    */
   @Test
-  public void testConcurrentPutAndQRM() throws Exception
-  {
+  public void testConcurrentPutAndQRM() throws Exception {
     testFailed = false;
     message = new StringBuffer();
     final HARegionQueue regionqueue = createHARegionQueue("testConcurrentPutAndQRM");
@@ -486,8 +422,7 @@ public class HARQAddOperationJUnitTest
    * put operation shud remove from region without adding the ID anywhere.
    */
   @Test
-  public void testConcurrentQRMAndPut() throws Exception
-  {
+  public void testConcurrentQRMAndPut() throws Exception {
     testFailed = false;
     final HARegionQueue regionqueue = createHARegionQueue("testConcurrentQRMAndPut");
     final EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
@@ -534,20 +469,15 @@ public class HARQAddOperationJUnitTest
 
     assertEquals(0, regionqueue.getAvalaibleIds().size());
     assertEquals(2, regionqueue.getLastDispatchedSequenceId(id2));
-
   }
 
   /**
    * Two QRMs arriving such that higer sequence number arriving before lower
    * sequence number. The lower squnce number should not set itself & also not
    * do any checking on the IDs of the LinkedHashSet
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testEventMapPopulationForQRM() throws Exception
-  {
+  public void testEventMapPopulationForQRM() throws Exception {
     HARegionQueue regionqueue = createHARegionQueue("testEventMapPopulationForQRM");
     EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
     EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
@@ -577,13 +507,9 @@ public class HARQAddOperationJUnitTest
    * for that ThreadIdentifier. The ID which gets conflated should not be
    * present in the availableID, Region & that ThreadIdentifier's HashSet . The
    * conflation map should contain the Old IDs position.
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testCleanUpForConflation() throws Exception
-  {
+  public void testCleanUpForConflation() throws Exception {
     this.logWriter
         .info("HARQAddOperationJUnitTest : testCleanUpForConflation BEGIN");
     testFailed = false;
@@ -672,10 +598,8 @@ public class HARQAddOperationJUnitTest
    * are deleted from the available IDs & the Counters set contained in DACE.
    * Conflation is disabled.
    */
-
   @Test
-  public void testPeekAndRemoveWithoutConflation() throws Exception
-  {
+  public void testPeekAndRemoveWithoutConflation() throws Exception {
     testFailed = false;
     message = null;
     final int numOfThreads = 5;
@@ -730,7 +654,6 @@ public class HARQAddOperationJUnitTest
 
     this.logWriter
         .info("testPeekAndRemoveWithoutConflation() completed successfully");
-
   }
 
   /**
@@ -739,10 +662,8 @@ public class HARQAddOperationJUnitTest
    * are deleted from the available IDs & the Counters set contained in DACE.
    * Conflation is enabled
    */
-
   @Test
-  public void testPeekAndRemoveWithConflation() throws Exception
-  {
+  public void testPeekAndRemoveWithConflation() throws Exception {
     testFailed = false;
     message = null;
     final int numOfThreads = 5;
@@ -802,7 +723,6 @@ public class HARQAddOperationJUnitTest
 
     this.logWriter
         .info("testPeekAndRemoveWithConflation() completed successfully");
-
   }
 
   /**
@@ -810,13 +730,9 @@ public class HARQAddOperationJUnitTest
    * do a peek of batch size 5, 10 , 15 & 20 respectively. And all of them
    * concurrently cal remove. The remove should ensure that the entries are
    * deleted from the available IDs & the Counters set contained in DACE.
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testPeekForDiffBatchSizeAndRemoveAll() throws Exception
-  {
+  public void testPeekForDiffBatchSizeAndRemoveAll() throws Exception {
     testFailed = false;
     message = null;
     barrierCount = 0;
@@ -924,12 +840,9 @@ public class HARQAddOperationJUnitTest
    * do a peek of batch size 5, 10 and 15 respectively. And all of them
    * concurrently call remove. The remove should ensure that the entries are
    * deleted from the available IDs & the Counters set contained in DACE.
-   * 
-   * @throws Exception
    */
   @Test
-  public void testPeekForDiffBatchSizeAndRemoveSome() throws Exception
-  {
+  public void testPeekForDiffBatchSizeAndRemoveSome() throws Exception {
     testFailed = false;
     barrierCount = 0;
     message = null;
@@ -1021,7 +934,6 @@ public class HARQAddOperationJUnitTest
 
     this.logWriter
         .info("testPeekForDiffBatchSizeAndRemoveSome() completed successfully");
-
   }
 
   /**
@@ -1034,9 +946,8 @@ public class HARQAddOperationJUnitTest
    * next expiry should remove the LastDisptachedWrapper
    */
   @Test
-  public void testAddWithQRMAndExpiry() throws Exception
-  {
-	try{  
+  public void testAddWithQRMAndExpiry() throws Exception {
+	  try {
       HARegionQueueAttributes attrs = new HARegionQueueAttributes();
       attrs.setExpiryTime(10);
       final HARegionQueue regionqueue = new HARegionQueue.TestOnlyHARegionQueue("testing", cache, attrs) {
@@ -1078,11 +989,11 @@ public class HARQAddOperationJUnitTest
       assertEquals(4, regionqueue.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)));
       // verify 1-5 not in region
       for (long i = 1; i < 6; i++) {
-        Assert.assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
+        assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
       }
       // verify 6-10 still in region queue
       for (long i = 6; i < 11; i++) {
-        Assert.assertTrue(regionqueue.getRegion().containsKey(new Long(i)));
+        assertTrue(regionqueue.getRegion().containsKey(new Long(i)));
       }
 
       // Perform 5 take operations to remove next 5-9 sequence ids
@@ -1094,7 +1005,7 @@ public class HARQAddOperationJUnitTest
       assertEquals(9, regionqueue.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)));
       // verify that sequence ids 1-10 all are removed from the RQ
       for (long i = 1; i < 11; i++) {
-        Assert.assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
+        assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
       }
 
       // wait until expiry thread has run once
@@ -1120,11 +1031,11 @@ public class HARQAddOperationJUnitTest
       assertNull(regionqueue.getRegion().get(tID));
     }
     catch (Exception e) {
-      fail("Exception occured in test due to " + e);
+      throw new AssertionError("Exception occurred in test due to", e);
     }
   }
 
-  /**
+  /*
    * This test does the following:<br>
    * 1)Create a blocking HARegionQueue<br>
    * 2)Add some events to the queue with same ThreadIdentifier<br>
@@ -1136,18 +1047,13 @@ public class HARQAddOperationJUnitTest
    * 7)Verify that the size of wrapper-map is 1 as all events had same ThreadId<br>
    * 8)Verify that the sequenceId against the ThreadId in the wrapper-map is
    * same as that of the last event taken<br>
-   * 
-   * @throws Exception -
-   *           thrown if any exception occurs in test execution
    */
   
   /**
    * Behaviour of take() has been changed for relaible messaging feature. Region queue take()
    * operation will no longer add to the Dispatch Message Map. Hence disabling the test - SUYOG
-  */
- 
-  public void _testDispatchedMsgsMapUpdateOnTakes() throws Exception
-  {
+   */
+  public void _testDispatchedMsgsMapUpdateOnTakes() throws Exception {
     this.logWriter
         .info("HARQAddOperationJUnitTest : testDispatchedEventsMapUpdateOnTakes BEGIN");
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/497208f2/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionJUnitTest.java
index ffb0c02..060eb9b 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionJUnitTest.java
@@ -16,13 +16,10 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 
-import junit.framework.Assert;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -54,19 +51,20 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 /**
  * Test verifies the properties of a HARegion which allows localPuts and
  * localDestroys on a MirroredRegion
- * 
- *  
  */
 @Category(IntegrationTest.class)
-public class HARegionJUnitTest
-{
+public class HARegionJUnitTest {
+
+  /**
+   * cache
+   */
+  private Cache cache = null;
 
   /**
    * create the cache
    */
   @Before
-  public void setUp() throws Exception
-  {
+  public void setUp() throws Exception {
     cache = createCache();
   }
 
@@ -74,49 +72,21 @@ public class HARegionJUnitTest
    * close the cache in tear down
    */
   @After
-  public void tearDown() throws Exception
-  {
+  public void tearDown() throws Exception {
     cache.close();
   }
 
   /**
-   * cache
-   */
-  private Cache cache = null;
-
-  /**
-   * 
    * create the cache
-   * 
-   * @throws TimeoutException
-   * @throws CacheWriterException
-   * @throws GatewayException
-   * @throws CacheExistsException
-   * @throws RegionExistsException
    */
-  private Cache createCache() throws TimeoutException, CacheWriterException,
-       GatewayException, CacheExistsException,
-      RegionExistsException
-  {
+  private Cache createCache() throws TimeoutException, CacheWriterException, GatewayException, CacheExistsException, RegionExistsException {
     return new CacheFactory().set("mcast-port",  "0").create();
   }
 
   /**
    * create the HARegion
-   * 
-   * @throws TimeoutException
-   * @throws CacheWriterException
-   * @throws GatewayException
-   * @throws CacheExistsException
-   * @throws RegionExistsException
-   * @throws IOException
-   * @throws ClassNotFoundException
    */
-  private Region createHARegion() throws TimeoutException,
-      CacheWriterException,  GatewayException,
-      CacheExistsException, RegionExistsException, IOException,
-      ClassNotFoundException
-  {
+  private Region createHARegion() throws TimeoutException, CacheWriterException,  GatewayException, CacheExistsException, RegionExistsException, IOException, ClassNotFoundException {
     AttributesFactory factory = new AttributesFactory();
     factory.setDataPolicy(DataPolicy.REPLICATE);
     factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -125,8 +95,8 @@ public class HARegionJUnitTest
     factory.setStatisticsEnabled(true);
     ;
     factory.setCacheListener(new CacheListenerAdapter() {
-      public void afterInvalidate(EntryEvent event)
-      {
+      @Override
+      public void afterInvalidate(EntryEvent event) {
       }
     });
     RegionAttributes ra = factory.create();
@@ -134,88 +104,56 @@ public class HARegionJUnitTest
         null, ra);
     region.getAttributesMutator().setEntryTimeToLive(ea);
     return region;
-
   }
 
   /**
    * test no exception being thrown while creating an HARegion
-   *  
    */
   @Test
-  public void testRegionCreation()
-  {
-    try {
-      createHARegion();
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed due to " + e);
-    }
+  public void testRegionCreation() throws Exception {
+    createHARegion();
   }
 
   /**
    * test no exception being thrown while put is being done on an HARegion
-   *  
    */
   @Test
-  public void testPut()
-  {
-    try {
-      Region region = createHARegion();
-      region.put("key1", "value1");
-      Assert.assertEquals(region.get("key1"), "value1");
-    }
-    catch (Exception e) {
-      fail("put failed due to " + e);
-    }
+  public void testPut() throws Exception {
+    Region region = createHARegion();
+    region.put("key1", "value1");
+    assertEquals(region.get("key1"), "value1");
   }
 
   /**
    * test no exception being thrown while doing a localDestroy on a HARegion
-   *  
    */
   @Test
-  public void testLocalDestroy()
-  {
-    try {
-      Region region = createHARegion();
-      region.put("key1", "value1");
-      region.localDestroy("key1");
-      Assert.assertEquals(region.get("key1"), null);
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail("put failed due to " + e);
-    }
+  public void testLocalDestroy() throws Exception {
+    Region region = createHARegion();
+    region.put("key1", "value1");
+    region.localDestroy("key1");
+    assertEquals(region.get("key1"), null);
   }
+
   /**
    * Test to verify event id exists when evict destroy happens.
-   * 
    */
   @Test
-  public void testEventIdSetForEvictDestroy()
-  { 
-    try{
-      AttributesFactory factory = new AttributesFactory();    
-      
-      factory.setCacheListener(new CacheListenerAdapter(){        
-        public void afterDestroy(EntryEvent event){          
-          assertTrue("eventId has not been set for "+ event, ((EntryEventImpl)event).getEventId() != null);          
-        }
-       });
-      
-      EvictionAttributes evAttr = EvictionAttributes.createLRUEntryAttributes(1,EvictionAction.LOCAL_DESTROY);
-      factory.setEvictionAttributes(evAttr);   
-            
-      RegionAttributes attrs = factory.createRegionAttributes();
-      Region region = cache.createVMRegion("TEST_REGION", attrs);
-      region.put("key1", "value1");
-      region.put("key2", "value2");
-    }
-    catch (Exception e) {      
-    }
-    
-    
-  }
+  public void testEventIdSetForEvictDestroy() throws Exception {
+    AttributesFactory factory = new AttributesFactory();
 
+    factory.setCacheListener(new CacheListenerAdapter(){
+      public void afterDestroy(EntryEvent event){
+        assertTrue("eventId has not been set for "+ event, ((EntryEventImpl)event).getEventId() != null);
+      }
+     });
+
+    EvictionAttributes evAttr = EvictionAttributes.createLRUEntryAttributes(1,EvictionAction.LOCAL_DESTROY);
+    factory.setEvictionAttributes(evAttr);
+
+    RegionAttributes attrs = factory.createRegionAttributes();
+    Region region = cache.createVMRegion("TEST_REGION", attrs);
+    region.put("key1", "value1");
+    region.put("key2", "value2");
+  }
 }